2017-05-02 15:22:00 +02:00
|
|
|
/* Copyright (c) 2016-2017, The Tor Project, Inc. */
|
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
|
|
|
|
/**
|
2017-06-01 13:12:33 +02:00
|
|
|
* \file hs_client.c
|
2017-05-02 15:22:00 +02:00
|
|
|
* \brief Implement next generation hidden service client functionality
|
|
|
|
**/
|
|
|
|
|
2017-09-08 17:43:45 +02:00
|
|
|
#define HS_CLIENT_PRIVATE
|
|
|
|
|
2017-05-02 15:22:00 +02:00
|
|
|
#include "or.h"
|
|
|
|
#include "hs_circuit.h"
|
2017-07-10 15:28:50 +02:00
|
|
|
#include "hs_ident.h"
|
2017-05-02 15:22:00 +02:00
|
|
|
#include "connection_edge.h"
|
2017-07-14 16:16:48 +02:00
|
|
|
#include "container.h"
|
2017-05-02 15:22:00 +02:00
|
|
|
#include "rendclient.h"
|
2017-06-01 13:12:33 +02:00
|
|
|
#include "hs_descriptor.h"
|
|
|
|
#include "hs_cache.h"
|
2017-07-14 16:16:48 +02:00
|
|
|
#include "hs_cell.h"
|
|
|
|
#include "hs_ident.h"
|
2017-06-01 13:12:33 +02:00
|
|
|
#include "config.h"
|
|
|
|
#include "directory.h"
|
2017-05-02 15:22:00 +02:00
|
|
|
#include "hs_client.h"
|
2017-11-10 18:07:57 +01:00
|
|
|
#include "hs_control.h"
|
2017-06-01 13:12:33 +02:00
|
|
|
#include "router.h"
|
2017-07-21 21:19:32 +02:00
|
|
|
#include "routerset.h"
|
2017-07-14 16:16:48 +02:00
|
|
|
#include "circuitlist.h"
|
|
|
|
#include "circuituse.h"
|
|
|
|
#include "connection.h"
|
2017-08-30 16:13:22 +02:00
|
|
|
#include "nodelist.h"
|
2017-07-14 16:16:48 +02:00
|
|
|
#include "circpathbias.h"
|
2017-07-21 20:32:47 +02:00
|
|
|
#include "connection.h"
|
2017-07-25 16:14:00 +02:00
|
|
|
#include "hs_ntor.h"
|
2017-07-27 18:37:52 +02:00
|
|
|
#include "circuitbuild.h"
|
2017-09-01 22:18:50 +02:00
|
|
|
#include "networkstatus.h"
|
2017-09-26 16:45:24 +02:00
|
|
|
#include "reasons.h"
|
|
|
|
|
|
|
|
/* Return a human-readable string for the client fetch status code. */
|
|
|
|
static const char *
|
|
|
|
fetch_status_to_string(hs_client_fetch_status_t status)
|
|
|
|
{
|
|
|
|
switch (status) {
|
|
|
|
case HS_CLIENT_FETCH_ERROR:
|
|
|
|
return "Internal error";
|
|
|
|
case HS_CLIENT_FETCH_LAUNCHED:
|
|
|
|
return "Descriptor fetch launched";
|
|
|
|
case HS_CLIENT_FETCH_HAVE_DESC:
|
|
|
|
return "Already have descriptor";
|
|
|
|
case HS_CLIENT_FETCH_NO_HSDIRS:
|
|
|
|
return "No more HSDir available to query";
|
|
|
|
case HS_CLIENT_FETCH_NOT_ALLOWED:
|
|
|
|
return "Fetching descriptors is not allowed";
|
|
|
|
case HS_CLIENT_FETCH_MISSING_INFO:
|
|
|
|
return "Missing directory information";
|
|
|
|
case HS_CLIENT_FETCH_PENDING:
|
|
|
|
return "Pending descriptor fetch";
|
|
|
|
default:
|
|
|
|
return "(Unknown client fetch status code)";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return true iff tor should close the SOCKS request(s) for the descriptor
|
|
|
|
* fetch that ended up with this given status code. */
|
|
|
|
static int
|
|
|
|
fetch_status_should_close_socks(hs_client_fetch_status_t status)
|
|
|
|
{
|
|
|
|
switch (status) {
|
|
|
|
case HS_CLIENT_FETCH_NO_HSDIRS:
|
|
|
|
/* No more HSDir to query, we can't complete the SOCKS request(s). */
|
|
|
|
case HS_CLIENT_FETCH_ERROR:
|
|
|
|
/* The fetch triggered an internal error. */
|
|
|
|
case HS_CLIENT_FETCH_NOT_ALLOWED:
|
|
|
|
/* Client is not allowed to fetch (FetchHidServDescriptors 0). */
|
|
|
|
goto close;
|
|
|
|
case HS_CLIENT_FETCH_MISSING_INFO:
|
|
|
|
case HS_CLIENT_FETCH_HAVE_DESC:
|
|
|
|
case HS_CLIENT_FETCH_PENDING:
|
|
|
|
case HS_CLIENT_FETCH_LAUNCHED:
|
|
|
|
/* The rest doesn't require tor to close the SOCKS request(s). */
|
|
|
|
goto no_close;
|
|
|
|
}
|
|
|
|
|
|
|
|
no_close:
|
|
|
|
return 0;
|
|
|
|
close:
|
|
|
|
return 1;
|
|
|
|
}
|
2017-07-14 16:16:48 +02:00
|
|
|
|
2017-08-30 15:15:54 +02:00
|
|
|
/* Cancel all descriptor fetches currently in progress. */
|
|
|
|
static void
|
|
|
|
cancel_descriptor_fetches(void)
|
|
|
|
{
|
|
|
|
smartlist_t *conns =
|
|
|
|
connection_list_by_type_state(CONN_TYPE_DIR, DIR_PURPOSE_FETCH_HSDESC);
|
|
|
|
SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
|
|
|
|
const hs_ident_dir_conn_t *ident = TO_DIR_CONN(conn)->hs_ident;
|
|
|
|
if (BUG(ident == NULL)) {
|
|
|
|
/* A directory connection fetching a service descriptor can't have an
|
|
|
|
* empty hidden service identifier. */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
log_debug(LD_REND, "Marking for close a directory connection fetching "
|
|
|
|
"a hidden service descriptor for service %s.",
|
|
|
|
safe_str_client(ed25519_fmt(&ident->identity_pk)));
|
|
|
|
connection_mark_for_close(conn);
|
|
|
|
} SMARTLIST_FOREACH_END(conn);
|
|
|
|
|
|
|
|
/* No ownership of the objects in this list. */
|
|
|
|
smartlist_free(conns);
|
|
|
|
log_info(LD_REND, "Hidden service client descriptor fetches cancelled.");
|
|
|
|
}
|
|
|
|
|
2017-07-14 16:16:48 +02:00
|
|
|
/* Get all connections that are waiting on a circuit and flag them back to
|
|
|
|
* waiting for a hidden service descriptor for the given service key
|
|
|
|
* service_identity_pk. */
|
|
|
|
static void
|
|
|
|
flag_all_conn_wait_desc(const ed25519_public_key_t *service_identity_pk)
|
|
|
|
{
|
|
|
|
tor_assert(service_identity_pk);
|
|
|
|
|
|
|
|
smartlist_t *conns =
|
|
|
|
connection_list_by_type_state(CONN_TYPE_AP, AP_CONN_STATE_CIRCUIT_WAIT);
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
|
|
|
|
edge_connection_t *edge_conn;
|
|
|
|
if (BUG(!CONN_IS_EDGE(conn))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
edge_conn = TO_EDGE_CONN(conn);
|
|
|
|
if (edge_conn->hs_ident &&
|
|
|
|
ed25519_pubkey_eq(&edge_conn->hs_ident->identity_pk,
|
|
|
|
service_identity_pk)) {
|
|
|
|
connection_ap_mark_as_non_pending_circuit(TO_ENTRY_CONN(conn));
|
|
|
|
conn->state = AP_CONN_STATE_RENDDESC_WAIT;
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(conn);
|
|
|
|
|
|
|
|
smartlist_free(conns);
|
|
|
|
}
|
2017-05-02 15:22:00 +02:00
|
|
|
|
2017-08-25 17:30:31 +02:00
|
|
|
/* Remove tracked HSDir requests from our history for this hidden service
|
|
|
|
* identity public key. */
|
|
|
|
static void
|
|
|
|
purge_hid_serv_request(const ed25519_public_key_t *identity_pk)
|
|
|
|
{
|
|
|
|
char base64_blinded_pk[ED25519_BASE64_LEN + 1];
|
|
|
|
ed25519_public_key_t blinded_pk;
|
|
|
|
|
|
|
|
tor_assert(identity_pk);
|
|
|
|
|
|
|
|
/* Get blinded pubkey of hidden service. It is possible that we just moved
|
|
|
|
* to a new time period meaning that we won't be able to purge the request
|
|
|
|
* from the previous time period. That is fine because they will expire at
|
|
|
|
* some point and we don't care about those anymore. */
|
|
|
|
hs_build_blinded_pubkey(identity_pk, NULL, 0,
|
2017-09-26 14:23:23 +02:00
|
|
|
hs_get_time_period_num(0), &blinded_pk);
|
2017-08-25 17:30:31 +02:00
|
|
|
if (BUG(ed25519_public_to_base64(base64_blinded_pk, &blinded_pk) < 0)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Purge last hidden service request from cache for this blinded key. */
|
|
|
|
hs_purge_hid_serv_from_last_hid_serv_requests(base64_blinded_pk);
|
|
|
|
}
|
|
|
|
|
2017-09-26 16:54:46 +02:00
|
|
|
/* Return true iff there is at least one pending directory descriptor request
|
|
|
|
* for the service identity_pk. */
|
|
|
|
static int
|
|
|
|
directory_request_is_pending(const ed25519_public_key_t *identity_pk)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
smartlist_t *conns =
|
|
|
|
connection_list_by_type_purpose(CONN_TYPE_DIR, DIR_PURPOSE_FETCH_HSDESC);
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
|
|
|
|
const hs_ident_dir_conn_t *ident = TO_DIR_CONN(conn)->hs_ident;
|
|
|
|
if (BUG(ident == NULL)) {
|
|
|
|
/* A directory connection fetching a service descriptor can't have an
|
|
|
|
* empty hidden service identifier. */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!ed25519_pubkey_eq(identity_pk, &ident->identity_pk)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ret = 1;
|
|
|
|
break;
|
|
|
|
} SMARTLIST_FOREACH_END(conn);
|
|
|
|
|
|
|
|
/* No ownership of the objects in this list. */
|
|
|
|
smartlist_free(conns);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-09-26 16:45:24 +02:00
|
|
|
/* We failed to fetch a descriptor for the service with <b>identity_pk</b>
|
|
|
|
* because of <b>status</b>. Find all pending SOCKS connections for this
|
|
|
|
* service that are waiting on the descriptor and close them with
|
|
|
|
* <b>reason</b>. */
|
|
|
|
static void
|
|
|
|
close_all_socks_conns_waiting_for_desc(const ed25519_public_key_t *identity_pk,
|
|
|
|
hs_client_fetch_status_t status,
|
|
|
|
int reason)
|
|
|
|
{
|
|
|
|
unsigned int count = 0;
|
|
|
|
time_t now = approx_time();
|
|
|
|
smartlist_t *conns =
|
|
|
|
connection_list_by_type_state(CONN_TYPE_AP, AP_CONN_STATE_RENDDESC_WAIT);
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(conns, connection_t *, base_conn) {
|
|
|
|
entry_connection_t *entry_conn = TO_ENTRY_CONN(base_conn);
|
|
|
|
const edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(entry_conn);
|
|
|
|
|
|
|
|
/* Only consider the entry connections that matches the service for which
|
|
|
|
* we tried to get the descriptor */
|
|
|
|
if (!edge_conn->hs_ident ||
|
|
|
|
!ed25519_pubkey_eq(identity_pk,
|
|
|
|
&edge_conn->hs_ident->identity_pk)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
assert_connection_ok(base_conn, now);
|
|
|
|
/* Unattach the entry connection which will close for the reason. */
|
|
|
|
connection_mark_unattached_ap(entry_conn, reason);
|
|
|
|
count++;
|
|
|
|
} SMARTLIST_FOREACH_END(base_conn);
|
|
|
|
|
|
|
|
if (count > 0) {
|
|
|
|
char onion_address[HS_SERVICE_ADDR_LEN_BASE32 + 1];
|
|
|
|
hs_build_address(identity_pk, HS_VERSION_THREE, onion_address);
|
|
|
|
log_notice(LD_REND, "Closed %u streams for service %s.onion "
|
|
|
|
"for reason %s. Fetch status: %s.",
|
|
|
|
count, safe_str_client(onion_address),
|
|
|
|
stream_end_reason_to_string(reason),
|
|
|
|
fetch_status_to_string(status));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No ownership of the object(s) in this list. */
|
|
|
|
smartlist_free(conns);
|
|
|
|
}
|
|
|
|
|
2017-10-04 22:22:49 +02:00
|
|
|
/* Find all pending SOCKS connection waiting for a descriptor and retry them
|
|
|
|
* all. This is called when the directory information changed. */
|
|
|
|
static void
|
|
|
|
retry_all_socks_conn_waiting_for_desc(void)
|
|
|
|
{
|
|
|
|
smartlist_t *conns =
|
|
|
|
connection_list_by_type_state(CONN_TYPE_AP, AP_CONN_STATE_RENDDESC_WAIT);
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(conns, connection_t *, base_conn) {
|
|
|
|
hs_client_fetch_status_t status;
|
|
|
|
const edge_connection_t *edge_conn =
|
|
|
|
ENTRY_TO_EDGE_CONN(TO_ENTRY_CONN(base_conn));
|
|
|
|
|
|
|
|
/* Ignore non HS or non v3 connection. */
|
|
|
|
if (edge_conn->hs_ident == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* In this loop, we will possibly try to fetch a descriptor for the
|
|
|
|
* pending connections because we just got more directory information.
|
|
|
|
* However, the refetch process can cleanup all SOCKS request so the same
|
|
|
|
* service if an internal error happens. Thus, we can end up with closed
|
|
|
|
* connections in our list. */
|
|
|
|
if (base_conn->marked_for_close) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX: There is an optimization we could do which is that for a service
|
|
|
|
* key, we could check if we can fetch and remember that decision. */
|
|
|
|
|
|
|
|
/* Order a refetch in case it works this time. */
|
|
|
|
status = hs_client_refetch_hsdesc(&edge_conn->hs_ident->identity_pk);
|
|
|
|
if (BUG(status == HS_CLIENT_FETCH_HAVE_DESC)) {
|
|
|
|
/* This case is unique because it can NOT happen in theory. Once we get
|
|
|
|
* a new descriptor, the HS client subsystem is notified immediately and
|
|
|
|
* the connections waiting for it are handled which means the state will
|
|
|
|
* change from renddesc wait state. Log this and continue to next
|
|
|
|
* connection. */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* In the case of an error, either all SOCKS connections have been
|
|
|
|
* closed or we are still missing directory information. Leave the
|
|
|
|
* connection in renddesc wait state so when we get more info, we'll be
|
|
|
|
* able to try it again. */
|
|
|
|
} SMARTLIST_FOREACH_END(base_conn);
|
|
|
|
|
|
|
|
/* We don't have ownership of those objects. */
|
|
|
|
smartlist_free(conns);
|
|
|
|
}
|
|
|
|
|
2017-06-28 20:20:28 +02:00
|
|
|
/* A v3 HS circuit successfully connected to the hidden service. Update the
|
|
|
|
* stream state at <b>hs_conn_ident</b> appropriately. */
|
2017-05-02 15:22:00 +02:00
|
|
|
static void
|
2017-06-28 20:20:28 +02:00
|
|
|
note_connection_attempt_succeeded(const hs_ident_edge_conn_t *hs_conn_ident)
|
2017-05-02 15:22:00 +02:00
|
|
|
{
|
2017-08-25 17:30:31 +02:00
|
|
|
tor_assert(hs_conn_ident);
|
|
|
|
|
|
|
|
/* Remove from the hid serv cache all requests for that service so we can
|
|
|
|
* query the HSDir again later on for various reasons. */
|
|
|
|
purge_hid_serv_request(&hs_conn_ident->identity_pk);
|
2017-05-02 15:22:00 +02:00
|
|
|
|
2017-08-25 17:30:31 +02:00
|
|
|
/* The v2 subsystem cleans up the intro point time out flag at this stage.
|
|
|
|
* We don't try to do it here because we still need to keep intact the intro
|
|
|
|
* point state for future connections. Even though we are able to connect to
|
|
|
|
* the service, doesn't mean we should reset the timed out intro points.
|
|
|
|
*
|
|
|
|
* It is not possible to have successfully connected to an intro point
|
|
|
|
* present in our cache that was on error or timed out. Every entry in that
|
|
|
|
* cache have a 2 minutes lifetime so ultimately the intro point(s) state
|
|
|
|
* will be reset and thus possible to be retried. */
|
2017-05-02 15:22:00 +02:00
|
|
|
}
|
|
|
|
|
2017-06-01 13:12:33 +02:00
|
|
|
/* Given the pubkey of a hidden service in <b>onion_identity_pk</b>, fetch its
|
2017-09-26 14:07:44 +02:00
|
|
|
* descriptor by launching a dir connection to <b>hsdir</b>. Return a
|
|
|
|
* hs_client_fetch_status_t status code depending on how it went. */
|
|
|
|
static hs_client_fetch_status_t
|
2017-06-01 13:12:33 +02:00
|
|
|
directory_launch_v3_desc_fetch(const ed25519_public_key_t *onion_identity_pk,
|
|
|
|
const routerstatus_t *hsdir)
|
|
|
|
{
|
2017-09-01 22:18:50 +02:00
|
|
|
uint64_t current_time_period = hs_get_time_period_num(0);
|
2017-06-01 13:12:33 +02:00
|
|
|
ed25519_public_key_t blinded_pubkey;
|
|
|
|
char base64_blinded_pubkey[ED25519_BASE64_LEN + 1];
|
|
|
|
hs_ident_dir_conn_t hs_conn_dir_ident;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
tor_assert(hsdir);
|
|
|
|
tor_assert(onion_identity_pk);
|
|
|
|
|
|
|
|
/* Get blinded pubkey */
|
|
|
|
hs_build_blinded_pubkey(onion_identity_pk, NULL, 0,
|
|
|
|
current_time_period, &blinded_pubkey);
|
|
|
|
/* ...and base64 it. */
|
|
|
|
retval = ed25519_public_to_base64(base64_blinded_pubkey, &blinded_pubkey);
|
|
|
|
if (BUG(retval < 0)) {
|
2017-09-12 18:24:26 +02:00
|
|
|
return HS_CLIENT_FETCH_ERROR;
|
2017-06-01 13:12:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy onion pk to a dir_ident so that we attach it to the dir conn */
|
2017-09-11 19:16:23 +02:00
|
|
|
hs_ident_dir_conn_init(onion_identity_pk, &blinded_pubkey,
|
|
|
|
&hs_conn_dir_ident);
|
2017-06-01 13:12:33 +02:00
|
|
|
|
|
|
|
/* Setup directory request */
|
|
|
|
directory_request_t *req =
|
|
|
|
directory_request_new(DIR_PURPOSE_FETCH_HSDESC);
|
|
|
|
directory_request_set_routerstatus(req, hsdir);
|
|
|
|
directory_request_set_indirection(req, DIRIND_ANONYMOUS);
|
|
|
|
directory_request_set_resource(req, base64_blinded_pubkey);
|
2017-08-09 17:45:29 +02:00
|
|
|
directory_request_fetch_set_hs_ident(req, &hs_conn_dir_ident);
|
2017-06-01 13:12:33 +02:00
|
|
|
directory_initiate_request(req);
|
|
|
|
directory_request_free(req);
|
|
|
|
|
|
|
|
log_info(LD_REND, "Descriptor fetch request for service %s with blinded "
|
|
|
|
"key %s to directory %s",
|
|
|
|
safe_str_client(ed25519_fmt(onion_identity_pk)),
|
|
|
|
safe_str_client(base64_blinded_pubkey),
|
|
|
|
safe_str_client(routerstatus_describe(hsdir)));
|
|
|
|
|
2017-11-10 18:07:57 +01:00
|
|
|
/* Fire a REQUESTED event on the control port. */
|
|
|
|
hs_control_desc_event_requested(onion_identity_pk, base64_blinded_pubkey,
|
|
|
|
hsdir);
|
|
|
|
|
2017-06-01 13:12:33 +02:00
|
|
|
/* Cleanup memory. */
|
|
|
|
memwipe(&blinded_pubkey, 0, sizeof(blinded_pubkey));
|
|
|
|
memwipe(base64_blinded_pubkey, 0, sizeof(base64_blinded_pubkey));
|
|
|
|
memwipe(&hs_conn_dir_ident, 0, sizeof(hs_conn_dir_ident));
|
|
|
|
|
2017-09-12 18:24:26 +02:00
|
|
|
return HS_CLIENT_FETCH_LAUNCHED;
|
2017-06-01 13:12:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Return the HSDir we should use to fetch the descriptor of the hidden
|
|
|
|
* service with identity key <b>onion_identity_pk</b>. */
|
2017-09-08 17:43:45 +02:00
|
|
|
STATIC routerstatus_t *
|
2017-06-01 13:12:33 +02:00
|
|
|
pick_hsdir_v3(const ed25519_public_key_t *onion_identity_pk)
|
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
char base64_blinded_pubkey[ED25519_BASE64_LEN + 1];
|
2017-09-01 22:18:50 +02:00
|
|
|
uint64_t current_time_period = hs_get_time_period_num(0);
|
2017-06-01 13:12:33 +02:00
|
|
|
smartlist_t *responsible_hsdirs;
|
|
|
|
ed25519_public_key_t blinded_pubkey;
|
|
|
|
routerstatus_t *hsdir_rs = NULL;
|
|
|
|
|
|
|
|
tor_assert(onion_identity_pk);
|
|
|
|
|
|
|
|
responsible_hsdirs = smartlist_new();
|
|
|
|
|
|
|
|
/* Get blinded pubkey of hidden service */
|
|
|
|
hs_build_blinded_pubkey(onion_identity_pk, NULL, 0,
|
|
|
|
current_time_period, &blinded_pubkey);
|
|
|
|
/* ...and base64 it. */
|
|
|
|
retval = ed25519_public_to_base64(base64_blinded_pubkey, &blinded_pubkey);
|
|
|
|
if (BUG(retval < 0)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get responsible hsdirs of service for this time period */
|
2017-09-06 17:12:28 +02:00
|
|
|
hs_get_responsible_hsdirs(&blinded_pubkey, current_time_period,
|
2017-09-08 12:05:22 +02:00
|
|
|
0, 1, responsible_hsdirs);
|
2017-06-01 13:12:33 +02:00
|
|
|
|
|
|
|
log_debug(LD_REND, "Found %d responsible HSDirs and about to pick one.",
|
|
|
|
smartlist_len(responsible_hsdirs));
|
|
|
|
|
|
|
|
/* Pick an HSDir from the responsible ones. The ownership of
|
|
|
|
* responsible_hsdirs is given to this function so no need to free it. */
|
|
|
|
hsdir_rs = hs_pick_hsdir(responsible_hsdirs, base64_blinded_pubkey);
|
|
|
|
|
|
|
|
return hsdir_rs;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Fetch a v3 descriptor using the given <b>onion_identity_pk</b>.
|
|
|
|
*
|
2017-09-26 14:07:44 +02:00
|
|
|
* On success, HS_CLIENT_FETCH_LAUNCHED is returned. Otherwise, an error from
|
|
|
|
* hs_client_fetch_status_t is returned. */
|
2017-09-26 18:03:16 +02:00
|
|
|
MOCK_IMPL(STATIC hs_client_fetch_status_t,
|
|
|
|
fetch_v3_desc, (const ed25519_public_key_t *onion_identity_pk))
|
2017-06-01 13:12:33 +02:00
|
|
|
{
|
|
|
|
routerstatus_t *hsdir_rs =NULL;
|
|
|
|
|
|
|
|
tor_assert(onion_identity_pk);
|
|
|
|
|
|
|
|
hsdir_rs = pick_hsdir_v3(onion_identity_pk);
|
|
|
|
if (!hsdir_rs) {
|
|
|
|
log_info(LD_REND, "Couldn't pick a v3 hsdir.");
|
2017-09-12 18:24:26 +02:00
|
|
|
return HS_CLIENT_FETCH_NO_HSDIRS;
|
2017-06-01 13:12:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return directory_launch_v3_desc_fetch(onion_identity_pk, hsdir_rs);
|
|
|
|
}
|
|
|
|
|
2017-08-23 17:15:25 +02:00
|
|
|
/* Make sure that the given v3 origin circuit circ is a valid correct
|
|
|
|
* introduction circuit. This will BUG() on any problems and hard assert if
|
|
|
|
* the anonymity of the circuit is not ok. Return 0 on success else -1 where
|
|
|
|
* the circuit should be mark for closed immediately. */
|
|
|
|
static int
|
|
|
|
intro_circ_is_ok(const origin_circuit_t *circ)
|
2017-06-29 19:29:23 +02:00
|
|
|
{
|
2017-08-23 17:15:25 +02:00
|
|
|
int ret = 0;
|
|
|
|
|
2017-06-29 19:29:23 +02:00
|
|
|
tor_assert(circ);
|
2017-08-23 17:15:25 +02:00
|
|
|
|
|
|
|
if (BUG(TO_CIRCUIT(circ)->purpose != CIRCUIT_PURPOSE_C_INTRODUCING &&
|
|
|
|
TO_CIRCUIT(circ)->purpose != CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT &&
|
|
|
|
TO_CIRCUIT(circ)->purpose != CIRCUIT_PURPOSE_C_INTRODUCE_ACKED)) {
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
if (BUG(circ->hs_ident == NULL)) {
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
if (BUG(!hs_ident_intro_circ_is_valid(circ->hs_ident))) {
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This can stop the tor daemon but we want that since if we don't have
|
|
|
|
* anonymity on this circuit, something went really wrong. */
|
2017-06-29 19:29:23 +02:00
|
|
|
assert_circ_anonymity_ok(circ, get_options());
|
2017-08-23 17:15:25 +02:00
|
|
|
return ret;
|
2017-06-29 19:29:23 +02:00
|
|
|
}
|
2017-07-14 16:16:48 +02:00
|
|
|
|
|
|
|
/* Find a descriptor intro point object that matches the given ident in the
|
|
|
|
* given descriptor desc. Return NULL if not found. */
|
|
|
|
static const hs_desc_intro_point_t *
|
|
|
|
find_desc_intro_point_by_ident(const hs_ident_circuit_t *ident,
|
|
|
|
const hs_descriptor_t *desc)
|
|
|
|
{
|
|
|
|
const hs_desc_intro_point_t *intro_point = NULL;
|
|
|
|
|
|
|
|
tor_assert(ident);
|
|
|
|
tor_assert(desc);
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(desc->encrypted_data.intro_points,
|
|
|
|
const hs_desc_intro_point_t *, ip) {
|
|
|
|
if (ed25519_pubkey_eq(&ident->intro_auth_pk,
|
|
|
|
&ip->auth_key_cert->signed_key)) {
|
|
|
|
intro_point = ip;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(ip);
|
|
|
|
|
|
|
|
return intro_point;
|
|
|
|
}
|
|
|
|
|
2017-08-22 20:51:27 +02:00
|
|
|
/* Find a descriptor intro point object from the descriptor object desc that
|
|
|
|
* matches the given legacy identity digest in legacy_id. Return NULL if not
|
|
|
|
* found. */
|
|
|
|
static hs_desc_intro_point_t *
|
|
|
|
find_desc_intro_point_by_legacy_id(const char *legacy_id,
|
|
|
|
const hs_descriptor_t *desc)
|
|
|
|
{
|
|
|
|
hs_desc_intro_point_t *ret_ip = NULL;
|
|
|
|
|
|
|
|
tor_assert(legacy_id);
|
|
|
|
tor_assert(desc);
|
|
|
|
|
|
|
|
/* We will go over every intro point and try to find which one is linked to
|
|
|
|
* that circuit. Those lists are small so it's not that expensive. */
|
|
|
|
SMARTLIST_FOREACH_BEGIN(desc->encrypted_data.intro_points,
|
|
|
|
hs_desc_intro_point_t *, ip) {
|
|
|
|
SMARTLIST_FOREACH_BEGIN(ip->link_specifiers,
|
|
|
|
const hs_desc_link_specifier_t *, lspec) {
|
|
|
|
/* Not all tor node have an ed25519 identity key so we still rely on the
|
|
|
|
* legacy identity digest. */
|
|
|
|
if (lspec->type != LS_LEGACY_ID) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (fast_memneq(legacy_id, lspec->u.legacy_id, DIGEST_LEN)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Found it. */
|
|
|
|
ret_ip = ip;
|
|
|
|
goto end;
|
|
|
|
} SMARTLIST_FOREACH_END(lspec);
|
|
|
|
} SMARTLIST_FOREACH_END(ip);
|
|
|
|
|
|
|
|
end:
|
|
|
|
return ret_ip;
|
|
|
|
}
|
|
|
|
|
2017-07-14 16:16:48 +02:00
|
|
|
/* Send an INTRODUCE1 cell along the intro circuit and populate the rend
|
|
|
|
* circuit identifier with the needed key material for the e2e encryption.
|
|
|
|
* Return 0 on success, -1 if there is a transient error such that an action
|
|
|
|
* has been taken to recover and -2 if there is a permanent error indicating
|
|
|
|
* that both circuits were closed. */
|
|
|
|
static int
|
|
|
|
send_introduce1(origin_circuit_t *intro_circ,
|
|
|
|
origin_circuit_t *rend_circ)
|
|
|
|
{
|
|
|
|
int status;
|
|
|
|
char onion_address[HS_SERVICE_ADDR_LEN_BASE32 + 1];
|
|
|
|
const ed25519_public_key_t *service_identity_pk = NULL;
|
|
|
|
const hs_desc_intro_point_t *ip;
|
|
|
|
|
|
|
|
tor_assert(rend_circ);
|
2017-08-23 17:15:25 +02:00
|
|
|
if (intro_circ_is_ok(intro_circ) < 0) {
|
|
|
|
goto perm_err;
|
|
|
|
}
|
2017-07-14 16:16:48 +02:00
|
|
|
|
|
|
|
service_identity_pk = &intro_circ->hs_ident->identity_pk;
|
|
|
|
/* For logging purposes. There will be a time where the hs_ident will have a
|
|
|
|
* version number but for now there is none because it's all v3. */
|
|
|
|
hs_build_address(service_identity_pk, HS_VERSION_THREE, onion_address);
|
|
|
|
|
|
|
|
log_info(LD_REND, "Sending INTRODUCE1 cell to service %s on circuit %u",
|
|
|
|
safe_str_client(onion_address), TO_CIRCUIT(intro_circ)->n_circ_id);
|
|
|
|
|
|
|
|
/* 1) Get descriptor from our cache. */
|
|
|
|
const hs_descriptor_t *desc =
|
|
|
|
hs_cache_lookup_as_client(service_identity_pk);
|
2017-07-27 23:15:19 +02:00
|
|
|
if (desc == NULL || !hs_client_any_intro_points_usable(service_identity_pk,
|
|
|
|
desc)) {
|
2017-07-14 16:16:48 +02:00
|
|
|
log_info(LD_REND, "Request to %s %s. Trying to fetch a new descriptor.",
|
|
|
|
safe_str_client(onion_address),
|
|
|
|
(desc) ? "didn't have usable intro points" :
|
|
|
|
"didn't have a descriptor");
|
|
|
|
hs_client_refetch_hsdesc(service_identity_pk);
|
|
|
|
/* We just triggered a refetch, make sure every connections are back
|
|
|
|
* waiting for that descriptor. */
|
|
|
|
flag_all_conn_wait_desc(service_identity_pk);
|
|
|
|
/* We just asked for a refetch so this is a transient error. */
|
|
|
|
goto tran_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We need to find which intro point in the descriptor we are connected to
|
|
|
|
* on intro_circ. */
|
|
|
|
ip = find_desc_intro_point_by_ident(intro_circ->hs_ident, desc);
|
|
|
|
if (BUG(ip == NULL)) {
|
|
|
|
/* If we can find a descriptor from this introduction circuit ident, we
|
|
|
|
* must have a valid intro point object. Permanent error. */
|
|
|
|
goto perm_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send the INTRODUCE1 cell. */
|
|
|
|
if (hs_circ_send_introduce1(intro_circ, rend_circ, ip,
|
|
|
|
desc->subcredential) < 0) {
|
|
|
|
/* Unable to send the cell, the intro circuit has been marked for close so
|
|
|
|
* this is a permanent error. */
|
|
|
|
tor_assert_nonfatal(TO_CIRCUIT(intro_circ)->marked_for_close);
|
|
|
|
goto perm_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cell has been sent successfully. Copy the introduction point
|
|
|
|
* authentication and encryption key in the rendezvous circuit identifier so
|
|
|
|
* we can compute the ntor keys when we receive the RENDEZVOUS2 cell. */
|
|
|
|
memcpy(&rend_circ->hs_ident->intro_enc_pk, &ip->enc_key,
|
|
|
|
sizeof(rend_circ->hs_ident->intro_enc_pk));
|
|
|
|
ed25519_pubkey_copy(&rend_circ->hs_ident->intro_auth_pk,
|
|
|
|
&intro_circ->hs_ident->intro_auth_pk);
|
|
|
|
|
|
|
|
/* Now, we wait for an ACK or NAK on this circuit. */
|
|
|
|
circuit_change_purpose(TO_CIRCUIT(intro_circ),
|
|
|
|
CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT);
|
|
|
|
/* Set timestamp_dirty, because circuit_expire_building expects it to
|
|
|
|
* specify when a circuit entered the _C_INTRODUCE_ACK_WAIT state. */
|
|
|
|
TO_CIRCUIT(intro_circ)->timestamp_dirty = time(NULL);
|
|
|
|
pathbias_count_use_attempt(intro_circ);
|
|
|
|
|
|
|
|
/* Success. */
|
|
|
|
status = 0;
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
perm_err:
|
|
|
|
/* Permanent error: it is possible that the intro circuit was closed prior
|
|
|
|
* because we weren't able to send the cell. Make sure we don't double close
|
|
|
|
* it which would result in a warning. */
|
|
|
|
if (!TO_CIRCUIT(intro_circ)->marked_for_close) {
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(intro_circ), END_CIRC_REASON_INTERNAL);
|
|
|
|
}
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(rend_circ), END_CIRC_REASON_INTERNAL);
|
|
|
|
status = -2;
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
tran_err:
|
|
|
|
status = -1;
|
|
|
|
|
|
|
|
end:
|
|
|
|
memwipe(onion_address, 0, sizeof(onion_address));
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:56:10 +02:00
|
|
|
/* Using the introduction circuit circ, setup the authentication key of the
|
|
|
|
* intro point this circuit has extended to. */
|
|
|
|
static void
|
|
|
|
setup_intro_circ_auth_key(origin_circuit_t *circ)
|
|
|
|
{
|
|
|
|
const hs_descriptor_t *desc;
|
2017-08-22 20:51:27 +02:00
|
|
|
const hs_desc_intro_point_t *ip;
|
2017-07-21 19:56:10 +02:00
|
|
|
|
|
|
|
tor_assert(circ);
|
|
|
|
|
|
|
|
desc = hs_cache_lookup_as_client(&circ->hs_ident->identity_pk);
|
|
|
|
if (BUG(desc == NULL)) {
|
|
|
|
/* Opening intro circuit without the descriptor is no good... */
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We will go over every intro point and try to find which one is linked to
|
|
|
|
* that circuit. Those lists are small so it's not that expensive. */
|
2017-08-22 20:51:27 +02:00
|
|
|
ip = find_desc_intro_point_by_legacy_id(
|
|
|
|
circ->build_state->chosen_exit->identity_digest, desc);
|
|
|
|
if (ip) {
|
|
|
|
/* We got it, copy its authentication key to the identifier. */
|
|
|
|
ed25519_pubkey_copy(&circ->hs_ident->intro_auth_pk,
|
|
|
|
&ip->auth_key_cert->signed_key);
|
|
|
|
goto end;
|
|
|
|
}
|
2017-07-21 19:56:10 +02:00
|
|
|
|
|
|
|
/* Reaching this point means we didn't find any intro point for this circuit
|
|
|
|
* which is not suppose to happen. */
|
|
|
|
tor_assert_nonfatal_unreached();
|
|
|
|
|
|
|
|
end:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Called when an introduction circuit has opened. */
|
|
|
|
static void
|
|
|
|
client_intro_circ_has_opened(origin_circuit_t *circ)
|
|
|
|
{
|
|
|
|
tor_assert(circ);
|
|
|
|
tor_assert(TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_C_INTRODUCING);
|
|
|
|
log_info(LD_REND, "Introduction circuit %u has opened. Attaching streams.",
|
|
|
|
(unsigned int) TO_CIRCUIT(circ)->n_circ_id);
|
|
|
|
|
|
|
|
/* This is an introduction circuit so we'll attach the correct
|
|
|
|
* authentication key to the circuit identifier so it can be identified
|
|
|
|
* properly later on. */
|
|
|
|
setup_intro_circ_auth_key(circ);
|
|
|
|
|
|
|
|
connection_ap_attach_pending(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Called when a rendezvous circuit has opened. */
|
|
|
|
static void
|
|
|
|
client_rendezvous_circ_has_opened(origin_circuit_t *circ)
|
|
|
|
{
|
|
|
|
tor_assert(circ);
|
|
|
|
tor_assert(TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_C_ESTABLISH_REND);
|
|
|
|
|
2017-08-30 16:13:22 +02:00
|
|
|
const extend_info_t *rp_ei = circ->build_state->chosen_exit;
|
|
|
|
|
|
|
|
/* Check that we didn't accidentally choose a node that does not understand
|
|
|
|
* the v3 rendezvous protocol */
|
|
|
|
if (rp_ei) {
|
|
|
|
const node_t *rp_node = node_get_by_id(rp_ei->identity_digest);
|
|
|
|
if (rp_node) {
|
|
|
|
if (BUG(!node_supports_v3_rendezvous_point(rp_node))) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:56:10 +02:00
|
|
|
log_info(LD_REND, "Rendezvous circuit has opened to %s.",
|
2017-08-30 16:13:22 +02:00
|
|
|
safe_str_client(extend_info_describe(rp_ei)));
|
2017-07-21 19:56:10 +02:00
|
|
|
|
2017-07-21 19:57:10 +02:00
|
|
|
/* Ignore returned value, nothing we can really do. On failure, the circuit
|
|
|
|
* will be marked for close. */
|
|
|
|
hs_circ_send_establish_rendezvous(circ);
|
2017-08-24 14:49:24 +02:00
|
|
|
|
|
|
|
/* Register rend circuit in circuitmap if it's still alive. */
|
|
|
|
if (!TO_CIRCUIT(circ)->marked_for_close) {
|
|
|
|
hs_circuitmap_register_rend_circ_client_side(circ,
|
|
|
|
circ->hs_ident->rendezvous_cookie);
|
|
|
|
}
|
2017-07-21 19:56:10 +02:00
|
|
|
}
|
|
|
|
|
2017-07-21 21:19:32 +02:00
|
|
|
/* This is an helper function that convert a descriptor intro point object ip
|
|
|
|
* to a newly allocated extend_info_t object fully initialized. Return NULL if
|
|
|
|
* we can't convert it for which chances are that we are missing or malformed
|
|
|
|
* link specifiers. */
|
2017-08-29 17:28:11 +02:00
|
|
|
STATIC extend_info_t *
|
2017-07-21 21:19:32 +02:00
|
|
|
desc_intro_point_to_extend_info(const hs_desc_intro_point_t *ip)
|
|
|
|
{
|
|
|
|
extend_info_t *ei;
|
|
|
|
smartlist_t *lspecs = smartlist_new();
|
|
|
|
|
|
|
|
tor_assert(ip);
|
|
|
|
|
|
|
|
/* We first encode the descriptor link specifiers into the binary
|
|
|
|
* representation which is a trunnel object. */
|
|
|
|
SMARTLIST_FOREACH_BEGIN(ip->link_specifiers,
|
|
|
|
const hs_desc_link_specifier_t *, desc_lspec) {
|
2017-08-22 20:12:49 +02:00
|
|
|
link_specifier_t *lspec = hs_desc_lspec_to_trunnel(desc_lspec);
|
2017-07-21 21:19:32 +02:00
|
|
|
smartlist_add(lspecs, lspec);
|
|
|
|
} SMARTLIST_FOREACH_END(desc_lspec);
|
|
|
|
|
2018-01-24 09:55:15 +01:00
|
|
|
/* Explicitly put the direct connection option to 0 because this is client
|
2017-07-21 21:19:32 +02:00
|
|
|
* side and there is no such thing as a non anonymous client. */
|
|
|
|
ei = hs_get_extend_info_from_lspecs(lspecs, &ip->onion_key, 0);
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH(lspecs, link_specifier_t *, ls, link_specifier_free(ls));
|
|
|
|
smartlist_free(lspecs);
|
|
|
|
return ei;
|
|
|
|
}
|
|
|
|
|
2017-07-27 23:06:42 +02:00
|
|
|
/* Return true iff the intro point ip for the service service_pk is usable.
|
|
|
|
* This function checks if the intro point is in the client intro state cache
|
|
|
|
* and checks at the failures. It is considered usable if:
|
|
|
|
* - No error happened (INTRO_POINT_FAILURE_GENERIC)
|
|
|
|
* - It is not flagged as timed out (INTRO_POINT_FAILURE_TIMEOUT)
|
|
|
|
* - The unreachable count is lower than
|
|
|
|
* MAX_INTRO_POINT_REACHABILITY_FAILURES (INTRO_POINT_FAILURE_UNREACHABLE)
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
intro_point_is_usable(const ed25519_public_key_t *service_pk,
|
|
|
|
const hs_desc_intro_point_t *ip)
|
|
|
|
{
|
|
|
|
const hs_cache_intro_state_t *state;
|
|
|
|
|
|
|
|
tor_assert(service_pk);
|
|
|
|
tor_assert(ip);
|
|
|
|
|
|
|
|
state = hs_cache_client_intro_state_find(service_pk,
|
|
|
|
&ip->auth_key_cert->signed_key);
|
|
|
|
if (state == NULL) {
|
|
|
|
/* This means we've never encountered any problem thus usable. */
|
|
|
|
goto usable;
|
|
|
|
}
|
|
|
|
if (state->error) {
|
|
|
|
log_info(LD_REND, "Intro point with auth key %s had an error. Not usable",
|
|
|
|
safe_str_client(ed25519_fmt(&ip->auth_key_cert->signed_key)));
|
|
|
|
goto not_usable;
|
|
|
|
}
|
|
|
|
if (state->timed_out) {
|
|
|
|
log_info(LD_REND, "Intro point with auth key %s timed out. Not usable",
|
|
|
|
safe_str_client(ed25519_fmt(&ip->auth_key_cert->signed_key)));
|
|
|
|
goto not_usable;
|
|
|
|
}
|
|
|
|
if (state->unreachable_count >= MAX_INTRO_POINT_REACHABILITY_FAILURES) {
|
|
|
|
log_info(LD_REND, "Intro point with auth key %s unreachable. Not usable",
|
|
|
|
safe_str_client(ed25519_fmt(&ip->auth_key_cert->signed_key)));
|
|
|
|
goto not_usable;
|
|
|
|
}
|
|
|
|
|
|
|
|
usable:
|
|
|
|
return 1;
|
|
|
|
not_usable:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-21 21:19:32 +02:00
|
|
|
/* Using a descriptor desc, return a newly allocated extend_info_t object of a
|
|
|
|
* randomly picked introduction point from its list. Return NULL if none are
|
|
|
|
* usable. */
|
2017-08-29 17:28:11 +02:00
|
|
|
STATIC extend_info_t *
|
2017-07-21 21:19:32 +02:00
|
|
|
client_get_random_intro(const ed25519_public_key_t *service_pk)
|
|
|
|
{
|
|
|
|
extend_info_t *ei = NULL, *ei_excluded = NULL;
|
|
|
|
smartlist_t *usable_ips = NULL;
|
|
|
|
const hs_descriptor_t *desc;
|
|
|
|
const hs_desc_encrypted_data_t *enc_data;
|
|
|
|
const or_options_t *options = get_options();
|
2017-10-26 07:48:02 +02:00
|
|
|
/* Calculate the onion address for logging purposes */
|
|
|
|
char onion_address[HS_SERVICE_ADDR_LEN_BASE32 + 1];
|
2017-07-21 21:19:32 +02:00
|
|
|
|
|
|
|
tor_assert(service_pk);
|
|
|
|
|
|
|
|
desc = hs_cache_lookup_as_client(service_pk);
|
2017-10-26 07:48:02 +02:00
|
|
|
/* Assume the service is v3 if the descriptor is missing. This is ok,
|
|
|
|
* because we only use the address in log messages */
|
|
|
|
hs_build_address(service_pk,
|
|
|
|
desc ? desc->plaintext_data.version : HS_VERSION_THREE,
|
|
|
|
onion_address);
|
2017-07-27 23:15:19 +02:00
|
|
|
if (desc == NULL || !hs_client_any_intro_points_usable(service_pk,
|
|
|
|
desc)) {
|
2017-07-21 21:19:32 +02:00
|
|
|
log_info(LD_REND, "Unable to randomly select an introduction point "
|
2017-10-26 07:48:02 +02:00
|
|
|
"for service %s because descriptor %s. We can't connect.",
|
|
|
|
safe_str_client(onion_address),
|
|
|
|
(desc) ? "doesn't have any usable intro points"
|
|
|
|
: "is missing (assuming v3 onion address)");
|
2017-07-21 21:19:32 +02:00
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
enc_data = &desc->encrypted_data;
|
|
|
|
usable_ips = smartlist_new();
|
|
|
|
smartlist_add_all(usable_ips, enc_data->intro_points);
|
|
|
|
while (smartlist_len(usable_ips) != 0) {
|
|
|
|
int idx;
|
|
|
|
const hs_desc_intro_point_t *ip;
|
|
|
|
|
|
|
|
/* Pick a random intro point and immediately remove it from the usable
|
|
|
|
* list so we don't pick it again if we have to iterate more. */
|
|
|
|
idx = crypto_rand_int(smartlist_len(usable_ips));
|
|
|
|
ip = smartlist_get(usable_ips, idx);
|
|
|
|
smartlist_del(usable_ips, idx);
|
|
|
|
|
2017-07-27 23:06:42 +02:00
|
|
|
/* We need to make sure we have a usable intro points which is in a good
|
|
|
|
* state in our cache. */
|
|
|
|
if (!intro_point_is_usable(service_pk, ip)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-07-21 21:19:32 +02:00
|
|
|
/* Generate an extend info object from the intro point object. */
|
|
|
|
ei = desc_intro_point_to_extend_info(ip);
|
|
|
|
if (ei == NULL) {
|
|
|
|
/* We can get here for instance if the intro point is a private address
|
|
|
|
* and we aren't allowed to extend to those. */
|
2017-10-26 07:48:02 +02:00
|
|
|
log_info(LD_REND, "Unable to select introduction point with auth key %s "
|
|
|
|
"for service %s, because we could not extend to it.",
|
|
|
|
safe_str_client(ed25519_fmt(&ip->auth_key_cert->signed_key)),
|
|
|
|
safe_str_client(onion_address));
|
2017-07-21 21:19:32 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Test the pick against ExcludeNodes. */
|
|
|
|
if (routerset_contains_extendinfo(options->ExcludeNodes, ei)) {
|
|
|
|
/* If this pick is in the ExcludeNodes list, we keep its reference so if
|
|
|
|
* we ever end up not being able to pick anything else and StrictNodes is
|
|
|
|
* unset, we'll use it. */
|
2017-08-29 17:29:05 +02:00
|
|
|
if (ei_excluded) {
|
|
|
|
/* If something was already here free it. After the loop is gone we
|
|
|
|
* will examine the last excluded intro point, and that's fine since
|
|
|
|
* that's random anyway */
|
|
|
|
extend_info_free(ei_excluded);
|
|
|
|
}
|
2017-07-21 21:19:32 +02:00
|
|
|
ei_excluded = ei;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Good pick! Let's go with this. */
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reaching this point means a couple of things. Either we can't use any of
|
|
|
|
* the intro point listed because the IP address can't be extended to or it
|
|
|
|
* is listed in the ExcludeNodes list. In the later case, if StrictNodes is
|
|
|
|
* set, we are forced to not use anything. */
|
|
|
|
ei = ei_excluded;
|
|
|
|
if (options->StrictNodes) {
|
2017-10-26 07:48:02 +02:00
|
|
|
log_warn(LD_REND, "Every introduction point for service %s is in the "
|
|
|
|
"ExcludeNodes set and StrictNodes is set. We can't connect.",
|
|
|
|
safe_str_client(onion_address));
|
2017-08-29 17:29:05 +02:00
|
|
|
extend_info_free(ei);
|
2017-07-21 21:19:32 +02:00
|
|
|
ei = NULL;
|
2017-10-26 07:48:02 +02:00
|
|
|
} else {
|
|
|
|
log_fn(LOG_PROTOCOL_WARN, LD_REND, "Every introduction point for service "
|
|
|
|
"%s is unusable or we can't extend to it. We can't connect.",
|
|
|
|
safe_str_client(onion_address));
|
2017-07-21 21:19:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
end:
|
|
|
|
smartlist_free(usable_ips);
|
2017-10-26 07:48:02 +02:00
|
|
|
memwipe(onion_address, 0, sizeof(onion_address));
|
2017-07-21 21:19:32 +02:00
|
|
|
return ei;
|
|
|
|
}
|
|
|
|
|
2017-07-27 23:06:42 +02:00
|
|
|
/* For this introduction circuit, we'll look at if we have any usable
|
|
|
|
* introduction point left for this service. If so, we'll use the circuit to
|
|
|
|
* re-extend to a new intro point. Else, we'll close the circuit and its
|
|
|
|
* corresponding rendezvous circuit. Return 0 if we are re-extending else -1
|
|
|
|
* if we are closing the circuits.
|
|
|
|
*
|
|
|
|
* This is called when getting an INTRODUCE_ACK cell with a NACK. */
|
|
|
|
static int
|
|
|
|
close_or_reextend_intro_circ(origin_circuit_t *intro_circ)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
const hs_descriptor_t *desc;
|
|
|
|
origin_circuit_t *rend_circ;
|
|
|
|
|
|
|
|
tor_assert(intro_circ);
|
|
|
|
|
|
|
|
desc = hs_cache_lookup_as_client(&intro_circ->hs_ident->identity_pk);
|
|
|
|
if (BUG(desc == NULL)) {
|
|
|
|
/* We can't continue without a descriptor. */
|
|
|
|
goto close;
|
|
|
|
}
|
|
|
|
/* We still have the descriptor, great! Let's try to see if we can
|
|
|
|
* re-extend by looking up if there are any usable intro points. */
|
2017-07-27 23:15:19 +02:00
|
|
|
if (!hs_client_any_intro_points_usable(&intro_circ->hs_ident->identity_pk,
|
|
|
|
desc)) {
|
2017-07-27 23:06:42 +02:00
|
|
|
goto close;
|
|
|
|
}
|
|
|
|
/* Try to re-extend now. */
|
|
|
|
if (hs_client_reextend_intro_circuit(intro_circ) < 0) {
|
|
|
|
goto close;
|
|
|
|
}
|
|
|
|
/* Success on re-extending. Don't return an error. */
|
|
|
|
ret = 0;
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
close:
|
|
|
|
/* Change the intro circuit purpose before so we don't report an intro point
|
|
|
|
* failure again triggering an extra descriptor fetch. The circuit can
|
|
|
|
* already be closed on failure to re-extend. */
|
|
|
|
if (!TO_CIRCUIT(intro_circ)->marked_for_close) {
|
|
|
|
circuit_change_purpose(TO_CIRCUIT(intro_circ),
|
|
|
|
CIRCUIT_PURPOSE_C_INTRODUCE_ACKED);
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(intro_circ), END_CIRC_REASON_FINISHED);
|
|
|
|
}
|
|
|
|
/* Close the related rendezvous circuit. */
|
|
|
|
rend_circ = hs_circuitmap_get_rend_circ_client_side(
|
|
|
|
intro_circ->hs_ident->rendezvous_cookie);
|
|
|
|
/* The rendezvous circuit might have collapsed while the INTRODUCE_ACK was
|
|
|
|
* inflight so we can't expect one every time. */
|
|
|
|
if (rend_circ) {
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(rend_circ), END_CIRC_REASON_FINISHED);
|
|
|
|
}
|
|
|
|
|
|
|
|
end:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-21 23:48:18 +02:00
|
|
|
/* Called when we get an INTRODUCE_ACK success status code. Do the appropriate
|
|
|
|
* actions for the rendezvous point and finally close intro_circ. */
|
|
|
|
static void
|
|
|
|
handle_introduce_ack_success(origin_circuit_t *intro_circ)
|
|
|
|
{
|
|
|
|
origin_circuit_t *rend_circ = NULL;
|
|
|
|
|
|
|
|
tor_assert(intro_circ);
|
|
|
|
|
|
|
|
log_info(LD_REND, "Received INTRODUCE_ACK ack! Informing rendezvous");
|
|
|
|
|
2017-07-25 11:17:20 +02:00
|
|
|
/* Get the rendezvous circuit for this rendezvous cookie. */
|
|
|
|
uint8_t *rendezvous_cookie = intro_circ->hs_ident->rendezvous_cookie;
|
2017-12-08 18:10:07 +01:00
|
|
|
rend_circ =
|
|
|
|
hs_circuitmap_get_established_rend_circ_client_side(rendezvous_cookie);
|
2017-07-21 23:48:18 +02:00
|
|
|
if (rend_circ == NULL) {
|
|
|
|
log_warn(LD_REND, "Can't find any rendezvous circuit. Stopping");
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_circ_anonymity_ok(rend_circ, get_options());
|
2017-09-11 17:06:22 +02:00
|
|
|
|
|
|
|
/* It is possible to get a RENDEZVOUS2 cell before the INTRODUCE_ACK which
|
|
|
|
* means that the circuit will be joined and already transmitting data. In
|
|
|
|
* that case, simply skip the purpose change and close the intro circuit
|
|
|
|
* like it should be. */
|
|
|
|
if (TO_CIRCUIT(rend_circ)->purpose == CIRCUIT_PURPOSE_C_REND_JOINED) {
|
|
|
|
goto end;
|
|
|
|
}
|
2017-07-21 23:48:18 +02:00
|
|
|
circuit_change_purpose(TO_CIRCUIT(rend_circ),
|
|
|
|
CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED);
|
|
|
|
/* Set timestamp_dirty, because circuit_expire_building expects it to
|
|
|
|
* specify when a circuit entered the
|
|
|
|
* CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED state. */
|
|
|
|
TO_CIRCUIT(rend_circ)->timestamp_dirty = time(NULL);
|
|
|
|
|
|
|
|
end:
|
|
|
|
/* We don't need the intro circuit anymore. It did what it had to do! */
|
|
|
|
circuit_change_purpose(TO_CIRCUIT(intro_circ),
|
|
|
|
CIRCUIT_PURPOSE_C_INTRODUCE_ACKED);
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(intro_circ), END_CIRC_REASON_FINISHED);
|
|
|
|
|
|
|
|
/* XXX: Close pending intro circuits we might have in parallel. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Called when we get an INTRODUCE_ACK failure status code. Depending on our
|
|
|
|
* failure cache status, either close the circuit or re-extend to a new
|
|
|
|
* introduction point. */
|
|
|
|
static void
|
|
|
|
handle_introduce_ack_bad(origin_circuit_t *circ, int status)
|
|
|
|
{
|
|
|
|
tor_assert(circ);
|
|
|
|
|
|
|
|
log_info(LD_REND, "Received INTRODUCE_ACK nack by %s. Reason: %u",
|
|
|
|
safe_str_client(extend_info_describe(circ->build_state->chosen_exit)),
|
|
|
|
status);
|
|
|
|
|
|
|
|
/* It's a NAK. The introduction point didn't relay our request. */
|
|
|
|
circuit_change_purpose(TO_CIRCUIT(circ), CIRCUIT_PURPOSE_C_INTRODUCING);
|
|
|
|
|
2017-07-27 23:06:42 +02:00
|
|
|
/* Note down this failure in the intro point failure cache. Depending on how
|
|
|
|
* many times we've tried this intro point, close it or reextend. */
|
|
|
|
hs_cache_client_intro_state_note(&circ->hs_ident->identity_pk,
|
|
|
|
&circ->hs_ident->intro_auth_pk,
|
|
|
|
INTRO_POINT_FAILURE_GENERIC);
|
2017-07-21 23:48:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Called when we get an INTRODUCE_ACK on the intro circuit circ. The encoded
|
|
|
|
* cell is in payload of length payload_len. Return 0 on success else a
|
|
|
|
* negative value. The circuit is either close or reuse to re-extend to a new
|
|
|
|
* introduction point. */
|
|
|
|
static int
|
|
|
|
handle_introduce_ack(origin_circuit_t *circ, const uint8_t *payload,
|
|
|
|
size_t payload_len)
|
|
|
|
{
|
|
|
|
int status, ret = -1;
|
|
|
|
|
|
|
|
tor_assert(circ);
|
|
|
|
tor_assert(circ->build_state);
|
|
|
|
tor_assert(circ->build_state->chosen_exit);
|
|
|
|
assert_circ_anonymity_ok(circ, get_options());
|
|
|
|
tor_assert(payload);
|
|
|
|
|
|
|
|
status = hs_cell_parse_introduce_ack(payload, payload_len);
|
|
|
|
switch (status) {
|
|
|
|
case HS_CELL_INTRO_ACK_SUCCESS:
|
|
|
|
ret = 0;
|
|
|
|
handle_introduce_ack_success(circ);
|
2017-07-27 23:06:42 +02:00
|
|
|
goto end;
|
2017-07-21 23:48:18 +02:00
|
|
|
case HS_CELL_INTRO_ACK_FAILURE:
|
|
|
|
case HS_CELL_INTRO_ACK_BADFMT:
|
|
|
|
case HS_CELL_INTRO_ACK_NORELAY:
|
|
|
|
handle_introduce_ack_bad(circ, status);
|
2017-07-27 23:06:42 +02:00
|
|
|
/* We are going to see if we have to close the circuits (IP and RP) or we
|
|
|
|
* can re-extend to a new intro point. */
|
|
|
|
ret = close_or_reextend_intro_circ(circ);
|
2017-07-21 23:48:18 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log_info(LD_PROTOCOL, "Unknown INTRODUCE_ACK status code %u from %s",
|
|
|
|
status,
|
|
|
|
safe_str_client(extend_info_describe(circ->build_state->chosen_exit)));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-07-27 23:06:42 +02:00
|
|
|
end:
|
2017-07-21 23:48:18 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-25 16:14:00 +02:00
|
|
|
/* Called when we get a RENDEZVOUS2 cell on the rendezvous circuit circ. The
|
|
|
|
* encoded cell is in payload of length payload_len. Return 0 on success or a
|
|
|
|
* negative value on error. On error, the circuit is marked for close. */
|
2017-09-25 14:00:06 +02:00
|
|
|
STATIC int
|
2017-07-25 16:14:00 +02:00
|
|
|
handle_rendezvous2(origin_circuit_t *circ, const uint8_t *payload,
|
|
|
|
size_t payload_len)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
curve25519_public_key_t server_pk;
|
|
|
|
uint8_t auth_mac[DIGEST256_LEN] = {0};
|
|
|
|
uint8_t handshake_info[CURVE25519_PUBKEY_LEN + sizeof(auth_mac)] = {0};
|
|
|
|
hs_ntor_rend_cell_keys_t keys;
|
|
|
|
const hs_ident_circuit_t *ident;
|
|
|
|
|
|
|
|
tor_assert(circ);
|
|
|
|
tor_assert(payload);
|
|
|
|
|
|
|
|
/* Make things easier. */
|
|
|
|
ident = circ->hs_ident;
|
|
|
|
tor_assert(ident);
|
|
|
|
|
|
|
|
if (hs_cell_parse_rendezvous2(payload, payload_len, handshake_info,
|
|
|
|
sizeof(handshake_info)) < 0) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
/* Get from the handshake info the SERVER_PK and AUTH_MAC. */
|
|
|
|
memcpy(&server_pk, handshake_info, CURVE25519_PUBKEY_LEN);
|
|
|
|
memcpy(auth_mac, handshake_info + CURVE25519_PUBKEY_LEN, sizeof(auth_mac));
|
|
|
|
|
|
|
|
/* Generate the handshake info. */
|
|
|
|
if (hs_ntor_client_get_rendezvous1_keys(&ident->intro_auth_pk,
|
|
|
|
&ident->rendezvous_client_kp,
|
|
|
|
&ident->intro_enc_pk, &server_pk,
|
|
|
|
&keys) < 0) {
|
|
|
|
log_info(LD_REND, "Unable to compute the rendezvous keys.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Critical check, make sure that the MAC matches what we got with what we
|
|
|
|
* computed just above. */
|
|
|
|
if (!hs_ntor_client_rendezvous2_mac_is_good(&keys, auth_mac)) {
|
|
|
|
log_info(LD_REND, "Invalid MAC in RENDEZVOUS2. Rejecting cell.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup the e2e encryption on the circuit and finalize its state. */
|
|
|
|
if (hs_circuit_setup_e2e_rend_circ(circ, keys.ntor_key_seed,
|
|
|
|
sizeof(keys.ntor_key_seed), 0) < 0) {
|
|
|
|
log_info(LD_REND, "Unable to setup the e2e encryption.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
/* Success. Hidden service connection finalized! */
|
|
|
|
ret = 0;
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
err:
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_TORPROTOCOL);
|
|
|
|
end:
|
|
|
|
memwipe(&keys, 0, sizeof(keys));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-10-04 21:49:25 +02:00
|
|
|
/* Return true iff the client can fetch a descriptor for this service public
|
|
|
|
* identity key and status_out if not NULL is untouched. If the client can
|
|
|
|
* _not_ fetch the descriptor and if status_out is not NULL, it is set with
|
|
|
|
* the fetch status code. */
|
|
|
|
static unsigned int
|
|
|
|
can_client_refetch_desc(const ed25519_public_key_t *identity_pk,
|
|
|
|
hs_client_fetch_status_t *status_out)
|
|
|
|
{
|
|
|
|
hs_client_fetch_status_t status;
|
|
|
|
|
|
|
|
tor_assert(identity_pk);
|
|
|
|
|
|
|
|
/* Are we configured to fetch descriptors? */
|
|
|
|
if (!get_options()->FetchHidServDescriptors) {
|
|
|
|
log_warn(LD_REND, "We received an onion address for a hidden service "
|
|
|
|
"descriptor but we are configured to not fetch.");
|
|
|
|
status = HS_CLIENT_FETCH_NOT_ALLOWED;
|
|
|
|
goto cannot;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Without a live consensus we can't do any client actions. It is needed to
|
|
|
|
* compute the hashring for a service. */
|
|
|
|
if (!networkstatus_get_live_consensus(approx_time())) {
|
|
|
|
log_info(LD_REND, "Can't fetch descriptor for service %s because we "
|
|
|
|
"are missing a live consensus. Stalling connection.",
|
|
|
|
safe_str_client(ed25519_fmt(identity_pk)));
|
|
|
|
status = HS_CLIENT_FETCH_MISSING_INFO;
|
|
|
|
goto cannot;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!router_have_minimum_dir_info()) {
|
|
|
|
log_info(LD_REND, "Can't fetch descriptor for service %s because we "
|
|
|
|
"dont have enough descriptors. Stalling connection.",
|
|
|
|
safe_str_client(ed25519_fmt(identity_pk)));
|
|
|
|
status = HS_CLIENT_FETCH_MISSING_INFO;
|
|
|
|
goto cannot;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if fetching a desc for this HS is useful to us right now */
|
|
|
|
{
|
|
|
|
const hs_descriptor_t *cached_desc = NULL;
|
|
|
|
cached_desc = hs_cache_lookup_as_client(identity_pk);
|
|
|
|
if (cached_desc && hs_client_any_intro_points_usable(identity_pk,
|
|
|
|
cached_desc)) {
|
|
|
|
log_info(LD_GENERAL, "We would fetch a v3 hidden service descriptor "
|
|
|
|
"but we already have a usable descriptor.");
|
|
|
|
status = HS_CLIENT_FETCH_HAVE_DESC;
|
|
|
|
goto cannot;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Don't try to refetch while we have a pending request for it. */
|
|
|
|
if (directory_request_is_pending(identity_pk)) {
|
|
|
|
log_info(LD_REND, "Already a pending directory request. Waiting on it.");
|
|
|
|
status = HS_CLIENT_FETCH_PENDING;
|
|
|
|
goto cannot;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Yes, client can fetch! */
|
|
|
|
return 1;
|
|
|
|
cannot:
|
|
|
|
if (status_out) {
|
|
|
|
*status_out = status;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-14 16:16:48 +02:00
|
|
|
/* ========== */
|
|
|
|
/* Public API */
|
|
|
|
/* ========== */
|
2017-06-29 19:29:23 +02:00
|
|
|
|
2017-05-02 15:22:00 +02:00
|
|
|
/** A circuit just finished connecting to a hidden service that the stream
|
|
|
|
* <b>conn</b> has been waiting for. Let the HS subsystem know about this. */
|
|
|
|
void
|
|
|
|
hs_client_note_connection_attempt_succeeded(const edge_connection_t *conn)
|
|
|
|
{
|
|
|
|
tor_assert(connection_edge_is_rendezvous_stream(conn));
|
|
|
|
|
|
|
|
if (BUG(conn->rend_data && conn->hs_ident)) {
|
|
|
|
log_warn(LD_BUG, "Stream had both rend_data and hs_ident..."
|
|
|
|
"Prioritizing hs_ident");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->hs_ident) { /* It's v3: pass it to the prop224 handler */
|
2017-06-28 20:20:28 +02:00
|
|
|
note_connection_attempt_succeeded(conn->hs_ident);
|
2017-05-02 15:22:00 +02:00
|
|
|
return;
|
|
|
|
} else if (conn->rend_data) { /* It's v2: pass it to the legacy handler */
|
|
|
|
rend_client_note_connection_attempt_ended(conn->rend_data);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-01 12:37:11 +02:00
|
|
|
/* With the given encoded descriptor in desc_str and the service key in
|
|
|
|
* service_identity_pk, decode the descriptor and set the desc pointer with a
|
|
|
|
* newly allocated descriptor object.
|
|
|
|
*
|
|
|
|
* Return 0 on success else a negative value and desc is set to NULL. */
|
|
|
|
int
|
|
|
|
hs_client_decode_descriptor(const char *desc_str,
|
|
|
|
const ed25519_public_key_t *service_identity_pk,
|
|
|
|
hs_descriptor_t **desc)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
uint8_t subcredential[DIGEST256_LEN];
|
2017-08-22 15:02:23 +02:00
|
|
|
ed25519_public_key_t blinded_pubkey;
|
2017-06-01 12:37:11 +02:00
|
|
|
|
|
|
|
tor_assert(desc_str);
|
|
|
|
tor_assert(service_identity_pk);
|
|
|
|
tor_assert(desc);
|
|
|
|
|
|
|
|
/* Create subcredential for this HS so that we can decrypt */
|
|
|
|
{
|
2017-09-01 22:18:50 +02:00
|
|
|
uint64_t current_time_period = hs_get_time_period_num(0);
|
2017-06-01 12:37:11 +02:00
|
|
|
hs_build_blinded_pubkey(service_identity_pk, NULL, 0, current_time_period,
|
|
|
|
&blinded_pubkey);
|
|
|
|
hs_get_subcredential(service_identity_pk, &blinded_pubkey, subcredential);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse descriptor */
|
|
|
|
ret = hs_desc_decode_descriptor(desc_str, subcredential, desc);
|
|
|
|
memwipe(subcredential, 0, sizeof(subcredential));
|
|
|
|
if (ret < 0) {
|
2017-08-31 18:33:52 +02:00
|
|
|
log_warn(LD_GENERAL, "Could not parse received descriptor as client.");
|
|
|
|
if (get_options()->SafeLogging_ == SAFELOG_SCRUB_NONE) {
|
|
|
|
log_warn(LD_GENERAL, "%s", escaped(desc_str));
|
|
|
|
}
|
2017-06-01 12:37:11 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2017-08-22 15:02:23 +02:00
|
|
|
/* Make sure the descriptor signing key cross certifies with the computed
|
|
|
|
* blinded key. Without this validation, anyone knowing the subcredential
|
|
|
|
* and onion address can forge a descriptor. */
|
2018-01-26 19:55:25 +01:00
|
|
|
tor_cert_t *cert = (*desc)->plaintext_data.signing_key_cert;
|
|
|
|
if (tor_cert_checksig(cert,
|
2017-08-22 15:02:23 +02:00
|
|
|
&blinded_pubkey, approx_time()) < 0) {
|
|
|
|
log_warn(LD_GENERAL, "Descriptor signing key certificate signature "
|
2018-01-26 19:55:25 +01:00
|
|
|
"doesn't validate with computed blinded key: %s",
|
|
|
|
tor_cert_describe_signature_status(cert));
|
2017-08-22 15:02:23 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2017-06-01 12:37:11 +02:00
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-07-27 23:15:19 +02:00
|
|
|
/* Return true iff there are at least one usable intro point in the service
|
|
|
|
* descriptor desc. */
|
2017-06-01 13:12:33 +02:00
|
|
|
int
|
2017-07-27 23:15:19 +02:00
|
|
|
hs_client_any_intro_points_usable(const ed25519_public_key_t *service_pk,
|
|
|
|
const hs_descriptor_t *desc)
|
2017-06-01 13:12:33 +02:00
|
|
|
{
|
2017-07-27 23:15:19 +02:00
|
|
|
tor_assert(service_pk);
|
2017-06-01 13:12:33 +02:00
|
|
|
tor_assert(desc);
|
2017-07-27 23:15:19 +02:00
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(desc->encrypted_data.intro_points,
|
|
|
|
const hs_desc_intro_point_t *, ip) {
|
|
|
|
if (intro_point_is_usable(service_pk, ip)) {
|
|
|
|
goto usable;
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(ip);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
usable:
|
2017-06-01 13:12:33 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Launch a connection to a hidden service directory to fetch a hidden
|
|
|
|
* service descriptor using <b>identity_pk</b> to get the necessary keys.
|
|
|
|
*
|
2017-09-12 18:24:26 +02:00
|
|
|
* A hs_client_fetch_status_t code is returned. */
|
2017-06-01 13:12:33 +02:00
|
|
|
int
|
|
|
|
hs_client_refetch_hsdesc(const ed25519_public_key_t *identity_pk)
|
|
|
|
{
|
2017-10-04 21:49:25 +02:00
|
|
|
hs_client_fetch_status_t status;
|
2017-09-26 16:45:24 +02:00
|
|
|
|
2017-06-01 13:12:33 +02:00
|
|
|
tor_assert(identity_pk);
|
|
|
|
|
2017-10-04 21:49:25 +02:00
|
|
|
if (!can_client_refetch_desc(identity_pk, &status)) {
|
|
|
|
return status;
|
2017-09-12 18:24:26 +02:00
|
|
|
}
|
|
|
|
|
2017-10-04 21:49:25 +02:00
|
|
|
/* Try to fetch the desc and if we encounter an unrecoverable error, mark
|
|
|
|
* the desc as unavailable for now. */
|
|
|
|
status = fetch_v3_desc(identity_pk);
|
|
|
|
if (fetch_status_should_close_socks(status)) {
|
|
|
|
close_all_socks_conns_waiting_for_desc(identity_pk, status,
|
2017-09-26 16:45:24 +02:00
|
|
|
END_STREAM_REASON_RESOLVEFAILED);
|
|
|
|
/* Remove HSDir fetch attempts so that we can retry later if the user
|
|
|
|
* wants us to regardless of if we closed any connections. */
|
|
|
|
purge_hid_serv_request(identity_pk);
|
|
|
|
}
|
2017-10-04 21:49:25 +02:00
|
|
|
return status;
|
2017-06-01 13:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-07-14 16:16:48 +02:00
|
|
|
/* This is called when we are trying to attach an AP connection to these
|
|
|
|
* hidden service circuits from connection_ap_handshake_attach_circuit().
|
|
|
|
* Return 0 on success, -1 for a transient error that is actions were
|
|
|
|
* triggered to recover or -2 for a permenent error where both circuits will
|
|
|
|
* marked for close.
|
|
|
|
*
|
|
|
|
* The following supports every hidden service version. */
|
|
|
|
int
|
|
|
|
hs_client_send_introduce1(origin_circuit_t *intro_circ,
|
|
|
|
origin_circuit_t *rend_circ)
|
|
|
|
{
|
|
|
|
return (intro_circ->hs_ident) ? send_introduce1(intro_circ, rend_circ) :
|
|
|
|
rend_client_send_introduction(intro_circ,
|
|
|
|
rend_circ);
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:56:10 +02:00
|
|
|
/* Called when the client circuit circ has been established. It can be either
|
|
|
|
* an introduction or rendezvous circuit. This function handles all hidden
|
|
|
|
* service versions. */
|
|
|
|
void
|
|
|
|
hs_client_circuit_has_opened(origin_circuit_t *circ)
|
|
|
|
{
|
|
|
|
tor_assert(circ);
|
|
|
|
|
|
|
|
/* Handle both version. v2 uses rend_data and v3 uses the hs circuit
|
|
|
|
* identifier hs_ident. Can't be both. */
|
|
|
|
switch (TO_CIRCUIT(circ)->purpose) {
|
|
|
|
case CIRCUIT_PURPOSE_C_INTRODUCING:
|
|
|
|
if (circ->hs_ident) {
|
|
|
|
client_intro_circ_has_opened(circ);
|
|
|
|
} else {
|
|
|
|
rend_client_introcirc_has_opened(circ);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case CIRCUIT_PURPOSE_C_ESTABLISH_REND:
|
|
|
|
if (circ->hs_ident) {
|
|
|
|
client_rendezvous_circ_has_opened(circ);
|
|
|
|
} else {
|
|
|
|
rend_client_rendcirc_has_opened(circ);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
tor_assert_nonfatal_unreached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-21 20:20:37 +02:00
|
|
|
/* Called when we receive a RENDEZVOUS_ESTABLISHED cell. Change the state of
|
|
|
|
* the circuit to CIRCUIT_PURPOSE_C_REND_READY. Return 0 on success else a
|
|
|
|
* negative value and the circuit marked for close. */
|
|
|
|
int
|
|
|
|
hs_client_receive_rendezvous_acked(origin_circuit_t *circ,
|
|
|
|
const uint8_t *payload, size_t payload_len)
|
|
|
|
{
|
|
|
|
tor_assert(circ);
|
|
|
|
tor_assert(payload);
|
|
|
|
|
|
|
|
(void) payload_len;
|
|
|
|
|
|
|
|
if (TO_CIRCUIT(circ)->purpose != CIRCUIT_PURPOSE_C_ESTABLISH_REND) {
|
|
|
|
log_warn(LD_PROTOCOL, "Got a RENDEZVOUS_ESTABLISHED but we were not "
|
|
|
|
"expecting one. Closing circuit.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
log_info(LD_REND, "Received an RENDEZVOUS_ESTABLISHED. This circuit is "
|
|
|
|
"now ready for rendezvous.");
|
|
|
|
circuit_change_purpose(TO_CIRCUIT(circ), CIRCUIT_PURPOSE_C_REND_READY);
|
|
|
|
|
|
|
|
/* Set timestamp_dirty, because circuit_expire_building expects it to
|
|
|
|
* specify when a circuit entered the _C_REND_READY state. */
|
|
|
|
TO_CIRCUIT(circ)->timestamp_dirty = time(NULL);
|
|
|
|
|
|
|
|
/* From a path bias point of view, this circuit is now successfully used.
|
|
|
|
* Waiting any longer opens us up to attacks from malicious hidden services.
|
|
|
|
* They could induce the client to attempt to connect to their hidden
|
|
|
|
* service and never reply to the client's rend requests */
|
|
|
|
pathbias_mark_use_success(circ);
|
|
|
|
|
|
|
|
/* If we already have the introduction circuit built, make sure we send
|
|
|
|
* the INTRODUCE cell _now_ */
|
|
|
|
connection_ap_attach_pending(1);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_TORPROTOCOL);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-07-21 20:32:47 +02:00
|
|
|
/* This is called when a descriptor has arrived following a fetch request and
|
|
|
|
* has been stored in the client cache. Every entry connection that matches
|
|
|
|
* the service identity key in the ident will get attached to the hidden
|
|
|
|
* service circuit. */
|
|
|
|
void
|
|
|
|
hs_client_desc_has_arrived(const hs_ident_dir_conn_t *ident)
|
|
|
|
{
|
|
|
|
time_t now = time(NULL);
|
|
|
|
smartlist_t *conns = NULL;
|
|
|
|
|
|
|
|
tor_assert(ident);
|
|
|
|
|
|
|
|
conns = connection_list_by_type_state(CONN_TYPE_AP,
|
|
|
|
AP_CONN_STATE_RENDDESC_WAIT);
|
|
|
|
SMARTLIST_FOREACH_BEGIN(conns, connection_t *, base_conn) {
|
|
|
|
const hs_descriptor_t *desc;
|
|
|
|
entry_connection_t *entry_conn = TO_ENTRY_CONN(base_conn);
|
|
|
|
const edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(entry_conn);
|
|
|
|
|
|
|
|
/* Only consider the entry connections that matches the service for which
|
|
|
|
* we just fetched its descriptor. */
|
|
|
|
if (!edge_conn->hs_ident ||
|
|
|
|
!ed25519_pubkey_eq(&ident->identity_pk,
|
|
|
|
&edge_conn->hs_ident->identity_pk)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
assert_connection_ok(base_conn, now);
|
|
|
|
|
|
|
|
/* We were just called because we stored the descriptor for this service
|
|
|
|
* so not finding a descriptor means we have a bigger problem. */
|
|
|
|
desc = hs_cache_lookup_as_client(&ident->identity_pk);
|
|
|
|
if (BUG(desc == NULL)) {
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2017-07-27 23:15:19 +02:00
|
|
|
if (!hs_client_any_intro_points_usable(&ident->identity_pk, desc)) {
|
2017-07-21 20:32:47 +02:00
|
|
|
log_info(LD_REND, "Hidden service descriptor is unusable. "
|
|
|
|
"Closing streams.");
|
|
|
|
connection_mark_unattached_ap(entry_conn,
|
|
|
|
END_STREAM_REASON_RESOLVEFAILED);
|
2017-08-25 19:39:40 +02:00
|
|
|
/* We are unable to use the descriptor so remove the directory request
|
|
|
|
* from the cache so the next connection can try again. */
|
|
|
|
note_connection_attempt_succeeded(edge_conn->hs_ident);
|
2017-07-21 20:32:47 +02:00
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
log_info(LD_REND, "Descriptor has arrived. Launching circuits.");
|
|
|
|
|
2017-09-21 20:24:36 +02:00
|
|
|
/* Because the connection can now proceed to opening circuit and
|
|
|
|
* ultimately connect to the service, reset those timestamp so the
|
|
|
|
* connection is considered "fresh" and can continue without being closed
|
|
|
|
* too early. */
|
2017-07-21 20:32:47 +02:00
|
|
|
base_conn->timestamp_created = now;
|
|
|
|
base_conn->timestamp_lastread = now;
|
|
|
|
base_conn->timestamp_lastwritten = now;
|
|
|
|
/* Change connection's state into waiting for a circuit. */
|
|
|
|
base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT;
|
|
|
|
|
|
|
|
connection_ap_mark_as_pending_circuit(entry_conn);
|
|
|
|
} SMARTLIST_FOREACH_END(base_conn);
|
|
|
|
|
|
|
|
end:
|
|
|
|
/* We don't have ownership of the objects in this list. */
|
|
|
|
smartlist_free(conns);
|
|
|
|
}
|
|
|
|
|
2017-07-21 21:19:32 +02:00
|
|
|
/* Return a newly allocated extend_info_t for a randomly chosen introduction
|
|
|
|
* point for the given edge connection identifier ident. Return NULL if we
|
|
|
|
* can't pick any usable introduction points. */
|
|
|
|
extend_info_t *
|
|
|
|
hs_client_get_random_intro_from_edge(const edge_connection_t *edge_conn)
|
|
|
|
{
|
|
|
|
tor_assert(edge_conn);
|
|
|
|
|
|
|
|
return (edge_conn->hs_ident) ?
|
|
|
|
client_get_random_intro(&edge_conn->hs_ident->identity_pk) :
|
|
|
|
rend_client_get_random_intro(edge_conn->rend_data);
|
|
|
|
}
|
2017-07-21 23:48:18 +02:00
|
|
|
/* Called when get an INTRODUCE_ACK cell on the introduction circuit circ.
|
|
|
|
* Return 0 on success else a negative value is returned. The circuit will be
|
|
|
|
* closed or reuse to extend again to another intro point. */
|
|
|
|
int
|
|
|
|
hs_client_receive_introduce_ack(origin_circuit_t *circ,
|
|
|
|
const uint8_t *payload, size_t payload_len)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
tor_assert(circ);
|
|
|
|
tor_assert(payload);
|
|
|
|
|
|
|
|
if (TO_CIRCUIT(circ)->purpose != CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT) {
|
|
|
|
log_warn(LD_PROTOCOL, "Unexpected INTRODUCE_ACK on circuit %u.",
|
|
|
|
(unsigned int) TO_CIRCUIT(circ)->n_circ_id);
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_TORPROTOCOL);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = (circ->hs_ident) ? handle_introduce_ack(circ, payload, payload_len) :
|
|
|
|
rend_client_introduction_acked(circ, payload,
|
|
|
|
payload_len);
|
|
|
|
/* For path bias: This circuit was used successfully. NACK or ACK counts. */
|
|
|
|
pathbias_mark_use_success(circ);
|
|
|
|
|
|
|
|
end:
|
|
|
|
return ret;
|
|
|
|
}
|
2017-07-21 21:19:32 +02:00
|
|
|
|
2017-07-25 16:14:00 +02:00
|
|
|
/* Called when get a RENDEZVOUS2 cell on the rendezvous circuit circ. Return
|
|
|
|
* 0 on success else a negative value is returned. The circuit will be closed
|
|
|
|
* on error. */
|
|
|
|
int
|
|
|
|
hs_client_receive_rendezvous2(origin_circuit_t *circ,
|
|
|
|
const uint8_t *payload, size_t payload_len)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
tor_assert(circ);
|
|
|
|
tor_assert(payload);
|
|
|
|
|
|
|
|
/* Circuit can possibly be in both state because we could receive a
|
|
|
|
* RENDEZVOUS2 cell before the INTRODUCE_ACK has been received. */
|
|
|
|
if (TO_CIRCUIT(circ)->purpose != CIRCUIT_PURPOSE_C_REND_READY &&
|
|
|
|
TO_CIRCUIT(circ)->purpose != CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED) {
|
|
|
|
log_warn(LD_PROTOCOL, "Unexpected RENDEZVOUS2 cell on circuit %u. "
|
|
|
|
"Closing circuit.",
|
|
|
|
(unsigned int) TO_CIRCUIT(circ)->n_circ_id);
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_TORPROTOCOL);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
log_info(LD_REND, "Got RENDEZVOUS2 cell from hidden service on circuit %u.",
|
|
|
|
TO_CIRCUIT(circ)->n_circ_id);
|
|
|
|
|
|
|
|
ret = (circ->hs_ident) ? handle_rendezvous2(circ, payload, payload_len) :
|
|
|
|
rend_client_receive_rendezvous(circ, payload,
|
|
|
|
payload_len);
|
|
|
|
end:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-27 18:37:52 +02:00
|
|
|
/* Extend the introduction circuit circ to another valid introduction point
|
|
|
|
* for the hidden service it is trying to connect to, or mark it and launch a
|
|
|
|
* new circuit if we can't extend it. Return 0 on success or possible
|
|
|
|
* success. Return -1 and mark the introduction circuit for close on permanent
|
|
|
|
* failure.
|
|
|
|
*
|
|
|
|
* On failure, the caller is responsible for marking the associated rendezvous
|
|
|
|
* circuit for close. */
|
|
|
|
int
|
|
|
|
hs_client_reextend_intro_circuit(origin_circuit_t *circ)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
extend_info_t *ei;
|
|
|
|
|
|
|
|
tor_assert(circ);
|
|
|
|
|
|
|
|
ei = (circ->hs_ident) ?
|
|
|
|
client_get_random_intro(&circ->hs_ident->identity_pk) :
|
|
|
|
rend_client_get_random_intro(circ->rend_data);
|
|
|
|
if (ei == NULL) {
|
|
|
|
log_warn(LD_REND, "No usable introduction points left. Closing.");
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_INTERNAL);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (circ->remaining_relay_early_cells) {
|
|
|
|
log_info(LD_REND, "Re-extending circ %u, this time to %s.",
|
|
|
|
(unsigned int) TO_CIRCUIT(circ)->n_circ_id,
|
|
|
|
safe_str_client(extend_info_describe(ei)));
|
|
|
|
ret = circuit_extend_to_new_exit(circ, ei);
|
2017-08-24 15:12:57 +02:00
|
|
|
if (ret == 0) {
|
|
|
|
/* We were able to extend so update the timestamp so we avoid expiring
|
|
|
|
* this circuit too early. The intro circuit is short live so the
|
|
|
|
* linkability issue is minimized, we just need the circuit to hold a
|
|
|
|
* bit longer so we can introduce. */
|
|
|
|
TO_CIRCUIT(circ)->timestamp_dirty = time(NULL);
|
|
|
|
}
|
2017-07-27 18:37:52 +02:00
|
|
|
} else {
|
|
|
|
log_info(LD_REND, "Closing intro circ %u (out of RELAY_EARLY cells).",
|
|
|
|
(unsigned int) TO_CIRCUIT(circ)->n_circ_id);
|
|
|
|
circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_FINISHED);
|
|
|
|
/* connection_ap_handshake_attach_circuit will launch a new intro circ. */
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
end:
|
|
|
|
extend_info_free(ei);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-08-29 22:02:13 +02:00
|
|
|
/* Release all the storage held by the client subsystem. */
|
|
|
|
void
|
|
|
|
hs_client_free_all(void)
|
|
|
|
{
|
|
|
|
/* Purge the hidden service request cache. */
|
|
|
|
hs_purge_last_hid_serv_requests();
|
|
|
|
}
|
|
|
|
|
2017-08-30 15:15:54 +02:00
|
|
|
/* Purge all potentially remotely-detectable state held in the hidden
|
|
|
|
* service client code. Called on SIGNAL NEWNYM. */
|
|
|
|
void
|
|
|
|
hs_client_purge_state(void)
|
|
|
|
{
|
|
|
|
/* v2 subsystem. */
|
|
|
|
rend_client_purge_state();
|
|
|
|
|
|
|
|
/* Cancel all descriptor fetches. Do this first so once done we are sure
|
|
|
|
* that our descriptor cache won't modified. */
|
|
|
|
cancel_descriptor_fetches();
|
|
|
|
/* Purge the introduction point state cache. */
|
|
|
|
hs_cache_client_intro_state_purge();
|
|
|
|
/* Purge the descriptor cache. */
|
|
|
|
hs_cache_purge_as_client();
|
|
|
|
/* Purge the last hidden service request cache. */
|
|
|
|
hs_purge_last_hid_serv_requests();
|
|
|
|
|
|
|
|
log_info(LD_REND, "Hidden service client state has been purged.");
|
|
|
|
}
|
|
|
|
|
2017-10-04 22:22:49 +02:00
|
|
|
/* Called when our directory information has changed. */
|
|
|
|
void
|
|
|
|
hs_client_dir_info_changed(void)
|
|
|
|
{
|
|
|
|
/* We have possibly reached the minimum directory information or new
|
|
|
|
* consensus so retry all pending SOCKS connection in
|
|
|
|
* AP_CONN_STATE_RENDDESC_WAIT state in order to fetch the descriptor. */
|
|
|
|
retry_all_socks_conn_waiting_for_desc();
|
|
|
|
}
|
|
|
|
|