2007-12-12 22:09:01 +01:00
|
|
|
/* Copyright (c) 2001-2004, Roger Dingledine.
|
|
|
|
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
|
2017-03-15 21:13:17 +01:00
|
|
|
* Copyright (c) 2007-2017, The Tor Project, Inc. */
|
2004-11-07 02:33:06 +01:00
|
|
|
/* See LICENSE for licensing information */
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2016-08-25 17:04:59 +02:00
|
|
|
#define DIRECTORY_PRIVATE
|
|
|
|
|
2002-09-26 14:09:10 +02:00
|
|
|
#include "or.h"
|
2015-12-22 01:31:54 +01:00
|
|
|
#include "backtrace.h"
|
2016-11-15 13:49:06 +01:00
|
|
|
#include "bridges.h"
|
2010-07-22 00:46:18 +02:00
|
|
|
#include "buffers.h"
|
2010-07-22 01:21:00 +02:00
|
|
|
#include "circuitbuild.h"
|
2010-07-22 10:22:51 +02:00
|
|
|
#include "config.h"
|
2010-07-22 10:32:52 +02:00
|
|
|
#include "connection.h"
|
2010-07-22 10:43:02 +02:00
|
|
|
#include "connection_edge.h"
|
2017-05-12 21:17:09 +02:00
|
|
|
#include "conscache.h"
|
2017-05-03 21:08:59 +02:00
|
|
|
#include "consdiff.h"
|
2017-04-28 20:36:24 +02:00
|
|
|
#include "consdiffmgr.h"
|
2010-07-22 11:35:09 +02:00
|
|
|
#include "control.h"
|
2017-02-07 15:24:51 +01:00
|
|
|
#include "compat.h"
|
2010-07-22 11:54:50 +02:00
|
|
|
#include "directory.h"
|
2010-07-22 12:09:49 +02:00
|
|
|
#include "dirserv.h"
|
2010-07-22 12:19:28 +02:00
|
|
|
#include "dirvote.h"
|
2012-10-15 20:48:34 +02:00
|
|
|
#include "entrynodes.h"
|
2010-07-21 14:38:52 +02:00
|
|
|
#include "geoip.h"
|
2016-08-11 21:21:54 +02:00
|
|
|
#include "hs_cache.h"
|
2016-05-31 20:51:30 +02:00
|
|
|
#include "hs_common.h"
|
2017-11-10 20:01:33 +01:00
|
|
|
#include "hs_control.h"
|
2017-06-01 13:22:17 +02:00
|
|
|
#include "hs_client.h"
|
2010-07-23 19:58:06 +02:00
|
|
|
#include "main.h"
|
2010-05-11 23:20:33 +02:00
|
|
|
#include "microdesc.h"
|
2010-07-23 20:18:55 +02:00
|
|
|
#include "networkstatus.h"
|
Initial conversion to use node_t throughout our codebase.
A node_t is an abstraction over routerstatus_t, routerinfo_t, and
microdesc_t. It should try to present a consistent interface to all
of them. There should be a node_t for a server whenever there is
* A routerinfo_t for it in the routerlist
* A routerstatus_t in the current_consensus.
(note that a microdesc_t alone isn't enough to make a node_t exist,
since microdescriptors aren't usable on their own.)
There are three ways to get a node_t right now: looking it up by ID,
looking it up by nickname, and iterating over the whole list of
microdescriptors.
All (or nearly all) functions that are supposed to return "a router"
-- especially those used in building connections and circuits --
should return a node_t, not a routerinfo_t or a routerstatus_t.
A node_t should hold all the *mutable* flags about a node. This
patch moves the is_foo flags from routerinfo_t into node_t. The
flags in routerstatus_t remain, but they get set from the consensus
and should not change.
Some other highlights of this patch are:
* Looking up routerinfo and routerstatus by nickname is now
unified and based on the "look up a node by nickname" function.
This tries to look only at the values from current consensus,
and not get confused by the routerinfo_t->is_named flag, which
could get set for other weird reasons. This changes the
behavior of how authorities (when acting as clients) deal with
nodes that have been listed by nickname.
* I tried not to artificially increase the size of the diff here
by moving functions around. As a result, some functions that
now operate on nodes are now in the wrong file -- they should
get moved to nodelist.c once this refactoring settles down.
This moving should happen as part of a patch that moves
functions AND NOTHING ELSE.
* Some old code is now left around inside #if 0/1 blocks, and
should get removed once I've verified that I don't want it
sitting around to see how we used to do things.
There are still some unimplemented functions: these are flagged
with "UNIMPLEMENTED_NODELIST()." I'll work on filling in the
implementation here, piece by piece.
I wish this patch could have been smaller, but there did not seem to
be any piece of it that was independent from the rest. Moving flags
forces many functions that once returned routerinfo_t * to return
node_t *, which forces their friends to change, and so on.
2010-09-29 21:00:41 +02:00
|
|
|
#include "nodelist.h"
|
2010-07-23 20:51:25 +02:00
|
|
|
#include "policies.h"
|
2014-11-17 17:43:50 +01:00
|
|
|
#include "relay.h"
|
2010-07-22 00:13:51 +02:00
|
|
|
#include "rendclient.h"
|
2010-07-21 17:52:54 +02:00
|
|
|
#include "rendcommon.h"
|
2015-05-29 23:45:45 +02:00
|
|
|
#include "rendservice.h"
|
2010-07-23 22:57:20 +02:00
|
|
|
#include "rephist.h"
|
2010-07-21 16:17:10 +02:00
|
|
|
#include "router.h"
|
2010-07-21 17:08:11 +02:00
|
|
|
#include "routerlist.h"
|
2010-07-23 23:23:43 +02:00
|
|
|
#include "routerparse.h"
|
2012-09-13 18:46:39 +02:00
|
|
|
#include "routerset.h"
|
2016-05-03 17:36:09 +02:00
|
|
|
#include "shared_random.h"
|
2010-07-21 14:38:52 +02:00
|
|
|
|
2007-08-21 00:31:39 +02:00
|
|
|
#if defined(EXPORTMALLINFO) && defined(HAVE_MALLOC_H) && defined(HAVE_MALLINFO)
|
2016-09-01 19:50:38 +02:00
|
|
|
#if !defined(OpenBSD)
|
2007-08-21 00:11:56 +02:00
|
|
|
#include <malloc.h>
|
|
|
|
#endif
|
2009-01-04 23:47:42 +01:00
|
|
|
#endif
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/**
|
|
|
|
* \file directory.c
|
2016-10-26 17:02:57 +02:00
|
|
|
* \brief Code to send and fetch information from directory authorities and
|
|
|
|
* caches via HTTP.
|
|
|
|
*
|
|
|
|
* Directory caches and authorities use dirserv.c to generate the results of a
|
|
|
|
* query and stream them to the connection; clients use routerparse.c to parse
|
|
|
|
* them.
|
|
|
|
*
|
|
|
|
* Every directory request has a dir_connection_t on the client side and on
|
|
|
|
* the server side. In most cases, the dir_connection_t object is a linked
|
|
|
|
* connection, tunneled through an edge_connection_t so that it can be a
|
|
|
|
* stream on the Tor network. The only non-tunneled connections are those
|
|
|
|
* that are used to upload material (descriptors and votes) to authorities.
|
|
|
|
* Among tunneled connections, some use one-hop circuits, and others use
|
|
|
|
* multi-hop circuits for anonymity.
|
|
|
|
*
|
|
|
|
* Directory requests are launched by calling
|
2017-04-21 20:17:14 +02:00
|
|
|
* directory_initiate_request(). This
|
2016-10-26 17:02:57 +02:00
|
|
|
* launch the connection, will construct an HTTP request with
|
|
|
|
* directory_send_command(), send the and wait for a response. The client
|
|
|
|
* later handles the response with connection_dir_client_reached_eof(),
|
|
|
|
* which passes the information received to another part of Tor.
|
|
|
|
*
|
|
|
|
* On the server side, requests are read in directory_handle_command(),
|
|
|
|
* which dispatches first on the request type (GET or POST), and then on
|
|
|
|
* the URL requested. GET requests are processed with a table-based
|
|
|
|
* dispatcher in url_table[]. The process of handling larger GET requests
|
|
|
|
* is complicated because we need to avoid allocating a copy of all the
|
|
|
|
* data to be sent to the client in one huge buffer. Instead, we spool the
|
|
|
|
* data into the buffer using logic in connection_dirserv_flushed_some() in
|
|
|
|
* dirserv.c. (TODO: If we extended buf.c to have a zero-copy
|
|
|
|
* reference-based buffer type, we could remove most of that code, at the
|
|
|
|
* cost of a bit more reference counting.)
|
2004-05-09 18:47:25 +02:00
|
|
|
**/
|
2004-05-05 04:50:38 +02:00
|
|
|
|
2004-05-13 01:48:57 +02:00
|
|
|
/* In-points to directory.c:
|
|
|
|
*
|
|
|
|
* - directory_post_to_dirservers(), called from
|
|
|
|
* router_upload_dir_desc_to_dirservers() in router.c
|
|
|
|
* upload_service_descriptor() in rendservice.c
|
|
|
|
* - directory_get_from_dirserver(), called from
|
|
|
|
* rend_client_refetch_renddesc() in rendclient.c
|
|
|
|
* run_scheduled_events() in main.c
|
|
|
|
* do_hup() in main.c
|
|
|
|
* - connection_dir_process_inbuf(), called from
|
|
|
|
* connection_process_inbuf() in connection.c
|
|
|
|
* - connection_dir_finished_flushing(), called from
|
|
|
|
* connection_finished_flushing() in connection.c
|
|
|
|
* - connection_dir_finished_connecting(), called from
|
|
|
|
* connection_finished_connecting() in connection.c
|
|
|
|
*/
|
2007-07-26 00:56:40 +02:00
|
|
|
static void directory_send_command(dir_connection_t *conn,
|
2017-04-21 21:49:10 +02:00
|
|
|
int direct,
|
|
|
|
const directory_request_t *request);
|
2005-01-19 23:47:48 +01:00
|
|
|
static int body_is_plausible(const char *body, size_t body_len, int purpose);
|
2006-07-04 20:18:08 +02:00
|
|
|
static void http_set_address_origin(const char *headers, connection_t *conn);
|
2006-07-26 21:07:26 +02:00
|
|
|
static void connection_dir_download_routerdesc_failed(dir_connection_t *conn);
|
2010-08-10 23:07:50 +02:00
|
|
|
static void connection_dir_bridge_routerdesc_failed(dir_connection_t *conn);
|
2007-10-09 17:27:45 +02:00
|
|
|
static void connection_dir_download_cert_failed(
|
|
|
|
dir_connection_t *conn, int status_code);
|
2010-09-13 03:12:17 +02:00
|
|
|
static void connection_dir_retry_bridges(smartlist_t *descs);
|
2006-12-25 03:47:37 +01:00
|
|
|
static void dir_routerdesc_download_failed(smartlist_t *failed,
|
2007-05-18 23:19:19 +02:00
|
|
|
int status_code,
|
2007-11-04 01:15:42 +01:00
|
|
|
int router_purpose,
|
2007-09-21 08:14:36 +02:00
|
|
|
int was_extrainfo,
|
|
|
|
int was_descriptor_digests);
|
2010-05-11 23:20:33 +02:00
|
|
|
static void dir_microdesc_download_failed(smartlist_t *failed,
|
2017-11-06 13:48:22 +01:00
|
|
|
int status_code,
|
|
|
|
const char *dir_id);
|
2017-05-12 21:17:09 +02:00
|
|
|
static int client_likes_consensus(const struct consensus_cache_entry_t *ent,
|
|
|
|
const char *want_url);
|
2003-09-16 07:41:49 +02:00
|
|
|
|
2016-04-13 08:54:31 +02:00
|
|
|
static void connection_dir_close_consensus_fetches(
|
|
|
|
dir_connection_t *except_this_one, const char *resource);
|
|
|
|
|
2002-09-26 14:09:10 +02:00
|
|
|
/********* START VARIABLES **********/
|
|
|
|
|
2006-03-12 23:48:18 +01:00
|
|
|
/** How far in the future do we allow a directory server to tell us it is
|
|
|
|
* before deciding that one of us has the wrong time? */
|
|
|
|
#define ALLOW_DIRECTORY_TIME_SKEW (30*60)
|
2004-08-15 22:30:15 +02:00
|
|
|
|
2006-07-17 08:35:06 +02:00
|
|
|
#define X_ADDRESS_HEADER "X-Your-Address-Is: "
|
2017-05-04 14:57:34 +02:00
|
|
|
#define X_OR_DIFF_FROM_CONSENSUS_HEADER "X-Or-Diff-From-Consensus: "
|
2006-07-17 08:35:06 +02:00
|
|
|
|
2007-02-02 21:06:43 +01:00
|
|
|
/** HTTP cache control: how long do we tell proxies they can cache each
|
|
|
|
* kind of document we serve? */
|
2006-10-20 01:05:34 +02:00
|
|
|
#define FULL_DIR_CACHE_LIFETIME (60*60)
|
|
|
|
#define RUNNINGROUTERS_CACHE_LIFETIME (20*60)
|
2008-12-07 02:21:19 +01:00
|
|
|
#define DIRPORTFRONTPAGE_CACHE_LIFETIME (20*60)
|
2006-10-20 01:05:34 +02:00
|
|
|
#define NETWORKSTATUS_CACHE_LIFETIME (5*60)
|
|
|
|
#define ROUTERDESC_CACHE_LIFETIME (30*60)
|
2006-10-20 01:28:38 +02:00
|
|
|
#define ROUTERDESC_BY_DIGEST_CACHE_LIFETIME (48*60*60)
|
2006-10-20 01:05:34 +02:00
|
|
|
#define ROBOTS_CACHE_LIFETIME (24*60*60)
|
2009-10-18 21:45:57 +02:00
|
|
|
#define MICRODESC_CACHE_LIFETIME (48*60*60)
|
2006-10-20 01:05:34 +02:00
|
|
|
|
2002-09-26 14:09:10 +02:00
|
|
|
/********* END VARIABLES ************/
|
|
|
|
|
2016-10-09 02:28:38 +02:00
|
|
|
/** Return false if the directory purpose <b>dir_purpose</b>
|
|
|
|
* does not require an anonymous (three-hop) connection.
|
|
|
|
*
|
|
|
|
* Return true 1) by default, 2) if all directory actions have
|
|
|
|
* specifically been configured to be over an anonymous connection,
|
|
|
|
* or 3) if the router is a bridge */
|
2016-07-01 02:08:38 +02:00
|
|
|
int
|
2016-10-26 01:30:50 +02:00
|
|
|
purpose_needs_anonymity(uint8_t dir_purpose, uint8_t router_purpose,
|
|
|
|
const char *resource)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2006-02-19 23:02:02 +01:00
|
|
|
if (get_options()->AllDirActionsPrivate)
|
|
|
|
return 1;
|
2016-10-09 02:28:38 +02:00
|
|
|
|
2016-10-26 01:30:50 +02:00
|
|
|
if (router_purpose == ROUTER_PURPOSE_BRIDGE) {
|
|
|
|
if (dir_purpose == DIR_PURPOSE_FETCH_SERVERDESC
|
|
|
|
&& resource && !strcmp(resource, "authority.z")) {
|
|
|
|
/* We are asking a bridge for its own descriptor. That doesn't need
|
|
|
|
anonymity. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* Assume all other bridge stuff needs anonymity. */
|
2012-04-27 17:27:32 +02:00
|
|
|
return 1; /* if no circuits yet, this might break bootstrapping, but it's
|
|
|
|
* needed to be safe. */
|
2016-10-26 01:30:50 +02:00
|
|
|
}
|
2016-10-09 02:28:38 +02:00
|
|
|
|
2016-10-19 02:04:22 +02:00
|
|
|
switch (dir_purpose)
|
|
|
|
{
|
|
|
|
case DIR_PURPOSE_UPLOAD_DIR:
|
|
|
|
case DIR_PURPOSE_UPLOAD_VOTE:
|
|
|
|
case DIR_PURPOSE_UPLOAD_SIGNATURES:
|
|
|
|
case DIR_PURPOSE_FETCH_STATUS_VOTE:
|
|
|
|
case DIR_PURPOSE_FETCH_DETACHED_SIGNATURES:
|
|
|
|
case DIR_PURPOSE_FETCH_CONSENSUS:
|
|
|
|
case DIR_PURPOSE_FETCH_CERTIFICATE:
|
|
|
|
case DIR_PURPOSE_FETCH_SERVERDESC:
|
|
|
|
case DIR_PURPOSE_FETCH_EXTRAINFO:
|
|
|
|
case DIR_PURPOSE_FETCH_MICRODESC:
|
|
|
|
return 0;
|
2017-06-27 15:46:16 +02:00
|
|
|
case DIR_PURPOSE_HAS_FETCHED_HSDESC:
|
2016-10-19 23:23:11 +02:00
|
|
|
case DIR_PURPOSE_HAS_FETCHED_RENDDESC_V2:
|
|
|
|
case DIR_PURPOSE_UPLOAD_RENDDESC_V2:
|
|
|
|
case DIR_PURPOSE_FETCH_RENDDESC_V2:
|
2017-04-19 20:36:53 +02:00
|
|
|
case DIR_PURPOSE_FETCH_HSDESC:
|
|
|
|
case DIR_PURPOSE_UPLOAD_HSDESC:
|
2016-10-19 23:23:11 +02:00
|
|
|
return 1;
|
|
|
|
case DIR_PURPOSE_SERVER:
|
2016-10-19 02:04:22 +02:00
|
|
|
default:
|
2016-10-19 23:23:11 +02:00
|
|
|
log_warn(LD_BUG, "Called with dir_purpose=%d, router_purpose=%d",
|
|
|
|
dir_purpose, router_purpose);
|
|
|
|
tor_assert_nonfatal_unreached();
|
|
|
|
return 1; /* Assume it needs anonymity; better safe than sorry. */
|
2016-10-19 02:04:22 +02:00
|
|
|
}
|
2005-02-27 10:47:01 +01:00
|
|
|
}
|
|
|
|
|
2010-11-08 20:21:32 +01:00
|
|
|
/** Return a newly allocated string describing <b>auth</b>. Only describes
|
|
|
|
* authority features. */
|
2015-09-07 21:04:51 +02:00
|
|
|
STATIC char *
|
2010-11-08 20:21:32 +01:00
|
|
|
authdir_type_to_string(dirinfo_type_t auth)
|
2007-05-09 04:20:03 +02:00
|
|
|
{
|
2007-05-09 06:15:46 +02:00
|
|
|
char *result;
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *lst = smartlist_new();
|
2010-11-08 20:35:02 +01:00
|
|
|
if (auth & V3_DIRINFO)
|
2010-04-04 03:53:29 +02:00
|
|
|
smartlist_add(lst, (void*)"V3");
|
2010-11-08 20:35:02 +01:00
|
|
|
if (auth & BRIDGE_DIRINFO)
|
2007-05-09 06:15:46 +02:00
|
|
|
smartlist_add(lst, (void*)"Bridge");
|
|
|
|
if (smartlist_len(lst)) {
|
|
|
|
result = smartlist_join_strings(lst, ", ", 0, NULL);
|
|
|
|
} else {
|
|
|
|
result = tor_strdup("[Not an authority]");
|
2007-05-09 04:20:03 +02:00
|
|
|
}
|
2007-05-09 06:15:46 +02:00
|
|
|
smartlist_free(lst);
|
|
|
|
return result;
|
2007-05-09 04:20:03 +02:00
|
|
|
}
|
|
|
|
|
2007-08-24 16:41:06 +02:00
|
|
|
/** Return a string describing a given directory connection purpose. */
|
2015-09-07 21:04:51 +02:00
|
|
|
STATIC const char *
|
2007-08-24 16:41:06 +02:00
|
|
|
dir_conn_purpose_to_string(int purpose)
|
|
|
|
{
|
|
|
|
switch (purpose)
|
|
|
|
{
|
|
|
|
case DIR_PURPOSE_UPLOAD_DIR:
|
|
|
|
return "server descriptor upload";
|
|
|
|
case DIR_PURPOSE_UPLOAD_VOTE:
|
|
|
|
return "server vote upload";
|
|
|
|
case DIR_PURPOSE_UPLOAD_SIGNATURES:
|
|
|
|
return "consensus signature upload";
|
|
|
|
case DIR_PURPOSE_FETCH_SERVERDESC:
|
|
|
|
return "server descriptor fetch";
|
|
|
|
case DIR_PURPOSE_FETCH_EXTRAINFO:
|
|
|
|
return "extra-info fetch";
|
|
|
|
case DIR_PURPOSE_FETCH_CONSENSUS:
|
|
|
|
return "consensus network-status fetch";
|
|
|
|
case DIR_PURPOSE_FETCH_CERTIFICATE:
|
|
|
|
return "authority cert fetch";
|
|
|
|
case DIR_PURPOSE_FETCH_STATUS_VOTE:
|
|
|
|
return "status vote fetch";
|
|
|
|
case DIR_PURPOSE_FETCH_DETACHED_SIGNATURES:
|
|
|
|
return "consensus signature fetch";
|
2007-10-29 20:10:42 +01:00
|
|
|
case DIR_PURPOSE_FETCH_RENDDESC_V2:
|
|
|
|
return "hidden-service v2 descriptor fetch";
|
|
|
|
case DIR_PURPOSE_UPLOAD_RENDDESC_V2:
|
|
|
|
return "hidden-service v2 descriptor upload";
|
2017-04-19 20:36:53 +02:00
|
|
|
case DIR_PURPOSE_FETCH_HSDESC:
|
|
|
|
return "hidden-service descriptor fetch";
|
|
|
|
case DIR_PURPOSE_UPLOAD_HSDESC:
|
|
|
|
return "hidden-service descriptor upload";
|
2010-05-11 23:20:33 +02:00
|
|
|
case DIR_PURPOSE_FETCH_MICRODESC:
|
|
|
|
return "microdescriptor fetch";
|
2007-08-24 16:41:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
log_warn(LD_BUG, "Called with unknown purpose %d", purpose);
|
|
|
|
return "(unknown)";
|
|
|
|
}
|
|
|
|
|
2014-09-23 18:12:57 +02:00
|
|
|
/** Return the requisite directory information types. */
|
|
|
|
STATIC dirinfo_type_t
|
|
|
|
dir_fetch_type(int dir_purpose, int router_purpose, const char *resource)
|
|
|
|
{
|
|
|
|
dirinfo_type_t type;
|
|
|
|
switch (dir_purpose) {
|
|
|
|
case DIR_PURPOSE_FETCH_EXTRAINFO:
|
|
|
|
type = EXTRAINFO_DIRINFO;
|
|
|
|
if (router_purpose == ROUTER_PURPOSE_BRIDGE)
|
|
|
|
type |= BRIDGE_DIRINFO;
|
|
|
|
else
|
|
|
|
type |= V3_DIRINFO;
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_SERVERDESC:
|
|
|
|
if (router_purpose == ROUTER_PURPOSE_BRIDGE)
|
|
|
|
type = BRIDGE_DIRINFO;
|
|
|
|
else
|
|
|
|
type = V3_DIRINFO;
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_STATUS_VOTE:
|
|
|
|
case DIR_PURPOSE_FETCH_DETACHED_SIGNATURES:
|
|
|
|
case DIR_PURPOSE_FETCH_CERTIFICATE:
|
|
|
|
type = V3_DIRINFO;
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_CONSENSUS:
|
|
|
|
type = V3_DIRINFO;
|
|
|
|
if (resource && !strcmp(resource, "microdesc"))
|
|
|
|
type |= MICRODESC_DIRINFO;
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_MICRODESC:
|
|
|
|
type = MICRODESC_DIRINFO;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log_warn(LD_BUG, "Unexpected purpose %d", (int)dir_purpose);
|
|
|
|
type = NO_DIRINFO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return type;
|
|
|
|
}
|
|
|
|
|
2014-05-05 15:31:52 +02:00
|
|
|
/** Return true iff <b>identity_digest</b> is the digest of a router which
|
|
|
|
* says that it caches extrainfos. (If <b>is_authority</b> we always
|
|
|
|
* believe that to be true.) */
|
2007-05-18 23:19:19 +02:00
|
|
|
int
|
|
|
|
router_supports_extrainfo(const char *identity_digest, int is_authority)
|
|
|
|
{
|
Initial conversion to use node_t throughout our codebase.
A node_t is an abstraction over routerstatus_t, routerinfo_t, and
microdesc_t. It should try to present a consistent interface to all
of them. There should be a node_t for a server whenever there is
* A routerinfo_t for it in the routerlist
* A routerstatus_t in the current_consensus.
(note that a microdesc_t alone isn't enough to make a node_t exist,
since microdescriptors aren't usable on their own.)
There are three ways to get a node_t right now: looking it up by ID,
looking it up by nickname, and iterating over the whole list of
microdescriptors.
All (or nearly all) functions that are supposed to return "a router"
-- especially those used in building connections and circuits --
should return a node_t, not a routerinfo_t or a routerstatus_t.
A node_t should hold all the *mutable* flags about a node. This
patch moves the is_foo flags from routerinfo_t into node_t. The
flags in routerstatus_t remain, but they get set from the consensus
and should not change.
Some other highlights of this patch are:
* Looking up routerinfo and routerstatus by nickname is now
unified and based on the "look up a node by nickname" function.
This tries to look only at the values from current consensus,
and not get confused by the routerinfo_t->is_named flag, which
could get set for other weird reasons. This changes the
behavior of how authorities (when acting as clients) deal with
nodes that have been listed by nickname.
* I tried not to artificially increase the size of the diff here
by moving functions around. As a result, some functions that
now operate on nodes are now in the wrong file -- they should
get moved to nodelist.c once this refactoring settles down.
This moving should happen as part of a patch that moves
functions AND NOTHING ELSE.
* Some old code is now left around inside #if 0/1 blocks, and
should get removed once I've verified that I don't want it
sitting around to see how we used to do things.
There are still some unimplemented functions: these are flagged
with "UNIMPLEMENTED_NODELIST()." I'll work on filling in the
implementation here, piece by piece.
I wish this patch could have been smaller, but there did not seem to
be any piece of it that was independent from the rest. Moving flags
forces many functions that once returned routerinfo_t * to return
node_t *, which forces their friends to change, and so on.
2010-09-29 21:00:41 +02:00
|
|
|
const node_t *node = node_get_by_id(identity_digest);
|
2007-05-18 23:19:19 +02:00
|
|
|
|
2010-10-05 06:39:01 +02:00
|
|
|
if (node && node->ri) {
|
Initial conversion to use node_t throughout our codebase.
A node_t is an abstraction over routerstatus_t, routerinfo_t, and
microdesc_t. It should try to present a consistent interface to all
of them. There should be a node_t for a server whenever there is
* A routerinfo_t for it in the routerlist
* A routerstatus_t in the current_consensus.
(note that a microdesc_t alone isn't enough to make a node_t exist,
since microdescriptors aren't usable on their own.)
There are three ways to get a node_t right now: looking it up by ID,
looking it up by nickname, and iterating over the whole list of
microdescriptors.
All (or nearly all) functions that are supposed to return "a router"
-- especially those used in building connections and circuits --
should return a node_t, not a routerinfo_t or a routerstatus_t.
A node_t should hold all the *mutable* flags about a node. This
patch moves the is_foo flags from routerinfo_t into node_t. The
flags in routerstatus_t remain, but they get set from the consensus
and should not change.
Some other highlights of this patch are:
* Looking up routerinfo and routerstatus by nickname is now
unified and based on the "look up a node by nickname" function.
This tries to look only at the values from current consensus,
and not get confused by the routerinfo_t->is_named flag, which
could get set for other weird reasons. This changes the
behavior of how authorities (when acting as clients) deal with
nodes that have been listed by nickname.
* I tried not to artificially increase the size of the diff here
by moving functions around. As a result, some functions that
now operate on nodes are now in the wrong file -- they should
get moved to nodelist.c once this refactoring settles down.
This moving should happen as part of a patch that moves
functions AND NOTHING ELSE.
* Some old code is now left around inside #if 0/1 blocks, and
should get removed once I've verified that I don't want it
sitting around to see how we used to do things.
There are still some unimplemented functions: these are flagged
with "UNIMPLEMENTED_NODELIST()." I'll work on filling in the
implementation here, piece by piece.
I wish this patch could have been smaller, but there did not seem to
be any piece of it that was independent from the rest. Moving flags
forces many functions that once returned routerinfo_t * to return
node_t *, which forces their friends to change, and so on.
2010-09-29 21:00:41 +02:00
|
|
|
if (node->ri->caches_extra_info)
|
2007-05-18 23:19:19 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (is_authority) {
|
2012-09-08 05:21:18 +02:00
|
|
|
return 1;
|
2007-05-18 23:19:19 +02:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-02-21 07:06:45 +01:00
|
|
|
/** Return true iff any trusted directory authority has accepted our
|
|
|
|
* server descriptor.
|
|
|
|
*
|
|
|
|
* We consider any authority sufficient because waiting for all of
|
|
|
|
* them means it never happens while any authority is down; we don't
|
|
|
|
* go for something more complex in the middle (like \>1/3 or \>1/2 or
|
|
|
|
* \>=1/2) because that doesn't seem necessary yet.
|
|
|
|
*/
|
2007-09-18 17:53:53 +02:00
|
|
|
int
|
|
|
|
directories_have_accepted_server_descriptor(void)
|
|
|
|
{
|
2012-09-08 04:55:53 +02:00
|
|
|
const smartlist_t *servers = router_get_trusted_dir_servers();
|
2011-06-14 19:01:38 +02:00
|
|
|
const or_options_t *options = get_options();
|
2012-09-10 21:55:27 +02:00
|
|
|
SMARTLIST_FOREACH(servers, dir_server_t *, d, {
|
2012-10-12 18:22:13 +02:00
|
|
|
if ((d->type & options->PublishServerDescriptor_) &&
|
2008-02-21 07:06:45 +01:00
|
|
|
d->has_accepted_serverdesc) {
|
|
|
|
return 1;
|
2007-09-18 17:53:53 +02:00
|
|
|
}
|
|
|
|
});
|
2008-02-21 07:06:45 +01:00
|
|
|
return 0;
|
2007-09-18 17:53:53 +02:00
|
|
|
}
|
|
|
|
|
2007-05-29 19:31:13 +02:00
|
|
|
/** Start a connection to every suitable directory authority, using
|
2011-03-11 09:09:24 +01:00
|
|
|
* connection purpose <b>dir_purpose</b> and uploading <b>payload</b>
|
|
|
|
* (of length <b>payload_len</b>). The dir_purpose should be one of
|
2014-02-12 14:36:08 +01:00
|
|
|
* 'DIR_PURPOSE_UPLOAD_{DIR|VOTE|SIGNATURES}'.
|
2007-05-01 22:13:49 +02:00
|
|
|
*
|
2011-03-11 09:09:24 +01:00
|
|
|
* <b>router_purpose</b> describes the type of descriptor we're
|
|
|
|
* publishing, if we're publishing a descriptor -- e.g. general or bridge.
|
|
|
|
*
|
2014-03-17 17:38:22 +01:00
|
|
|
* <b>type</b> specifies what sort of dir authorities (V3,
|
2014-02-12 14:36:08 +01:00
|
|
|
* BRIDGE, etc) we should upload to.
|
2007-05-07 11:28:48 +02:00
|
|
|
*
|
2007-05-29 19:31:13 +02:00
|
|
|
* If <b>extrainfo_len</b> is nonzero, the first <b>payload_len</b> bytes of
|
|
|
|
* <b>payload</b> hold a router descriptor, and the next <b>extrainfo_len</b>
|
|
|
|
* bytes of <b>payload</b> hold an extra-info document. Upload the descriptor
|
|
|
|
* to all authorities, and the extra-info document to all authorities that
|
|
|
|
* support it.
|
2004-05-13 01:48:57 +02:00
|
|
|
*/
|
|
|
|
void
|
2007-07-22 02:16:48 +02:00
|
|
|
directory_post_to_dirservers(uint8_t dir_purpose, uint8_t router_purpose,
|
2010-11-08 20:27:36 +01:00
|
|
|
dirinfo_type_t type,
|
2007-05-07 11:28:48 +02:00
|
|
|
const char *payload,
|
2007-05-01 22:13:49 +02:00
|
|
|
size_t payload_len, size_t extrainfo_len)
|
2004-05-13 01:48:57 +02:00
|
|
|
{
|
2011-06-14 19:01:38 +02:00
|
|
|
const or_options_t *options = get_options();
|
2014-05-15 03:49:57 +02:00
|
|
|
dir_indirection_t indirection;
|
2012-09-08 04:55:53 +02:00
|
|
|
const smartlist_t *dirservers = router_get_trusted_dir_servers();
|
2007-05-09 04:20:03 +02:00
|
|
|
int found = 0;
|
2011-05-12 06:47:00 +02:00
|
|
|
const int exclude_self = (dir_purpose == DIR_PURPOSE_UPLOAD_VOTE ||
|
|
|
|
dir_purpose == DIR_PURPOSE_UPLOAD_SIGNATURES);
|
2004-10-15 21:04:38 +02:00
|
|
|
tor_assert(dirservers);
|
2004-10-15 21:17:36 +02:00
|
|
|
/* This tries dirservers which we believe to be down, but ultimately, that's
|
|
|
|
* harmless, and we may as well err on the side of getting things uploaded.
|
|
|
|
*/
|
2012-09-10 21:55:27 +02:00
|
|
|
SMARTLIST_FOREACH_BEGIN(dirservers, dir_server_t *, ds) {
|
2007-10-09 17:27:15 +02:00
|
|
|
routerstatus_t *rs = &(ds->fake_status);
|
2007-05-01 22:13:49 +02:00
|
|
|
size_t upload_len = payload_len;
|
2007-05-09 06:15:46 +02:00
|
|
|
|
|
|
|
if ((type & ds->type) == 0)
|
2006-02-06 05:56:18 +01:00
|
|
|
continue;
|
2007-05-09 06:15:46 +02:00
|
|
|
|
2014-02-08 11:02:27 +01:00
|
|
|
if (exclude_self && router_digest_is_me(ds->digest)) {
|
|
|
|
/* we don't upload to ourselves, but at least there's now at least
|
|
|
|
* one authority of this type that has what we wanted to upload. */
|
|
|
|
found = 1;
|
2011-05-12 06:47:00 +02:00
|
|
|
continue;
|
2014-02-08 11:02:27 +01:00
|
|
|
}
|
2011-05-12 06:47:00 +02:00
|
|
|
|
2011-07-07 17:52:13 +02:00
|
|
|
if (options->StrictNodes &&
|
2011-04-27 20:36:30 +02:00
|
|
|
routerset_contains_routerstatus(options->ExcludeNodes, rs, -1)) {
|
2011-03-11 09:09:24 +01:00
|
|
|
log_warn(LD_DIR, "Wanted to contact authority '%s' for %s, but "
|
|
|
|
"it's in our ExcludedNodes list and StrictNodes is set. "
|
|
|
|
"Skipping.",
|
|
|
|
ds->nickname,
|
|
|
|
dir_conn_purpose_to_string(dir_purpose));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2007-05-09 04:20:03 +02:00
|
|
|
found = 1; /* at least one authority of this type was listed */
|
2007-07-22 02:16:48 +02:00
|
|
|
if (dir_purpose == DIR_PURPOSE_UPLOAD_DIR)
|
2007-01-06 06:42:31 +01:00
|
|
|
ds->has_accepted_serverdesc = 0;
|
2007-05-01 22:29:32 +02:00
|
|
|
|
2007-05-18 23:19:19 +02:00
|
|
|
if (extrainfo_len && router_supports_extrainfo(ds->digest, 1)) {
|
2007-05-01 22:13:49 +02:00
|
|
|
upload_len += extrainfo_len;
|
2009-04-01 15:02:04 +02:00
|
|
|
log_info(LD_DIR, "Uploading an extrainfo too (length %d)",
|
2007-05-01 22:42:23 +02:00
|
|
|
(int) extrainfo_len);
|
2007-05-01 22:13:49 +02:00
|
|
|
}
|
2016-10-26 01:30:50 +02:00
|
|
|
if (purpose_needs_anonymity(dir_purpose, router_purpose, NULL)) {
|
2014-05-15 03:49:57 +02:00
|
|
|
indirection = DIRIND_ANONYMOUS;
|
2015-12-14 07:23:10 +01:00
|
|
|
} else if (!fascist_firewall_allows_dir_server(ds,
|
|
|
|
FIREWALL_DIR_CONNECTION,
|
|
|
|
0)) {
|
|
|
|
if (fascist_firewall_allows_dir_server(ds, FIREWALL_OR_CONNECTION, 0))
|
2014-05-15 03:49:57 +02:00
|
|
|
indirection = DIRIND_ONEHOP;
|
|
|
|
else
|
|
|
|
indirection = DIRIND_ANONYMOUS;
|
|
|
|
} else {
|
|
|
|
indirection = DIRIND_DIRECT_CONN;
|
|
|
|
}
|
2017-04-21 20:35:42 +02:00
|
|
|
|
|
|
|
directory_request_t *req = directory_request_new(dir_purpose);
|
|
|
|
directory_request_set_routerstatus(req, rs);
|
|
|
|
directory_request_set_router_purpose(req, router_purpose);
|
|
|
|
directory_request_set_indirection(req, indirection);
|
|
|
|
directory_request_set_payload(req, payload, upload_len);
|
|
|
|
directory_initiate_request(req);
|
|
|
|
directory_request_free(req);
|
2008-08-05 22:08:19 +02:00
|
|
|
} SMARTLIST_FOREACH_END(ds);
|
2007-05-09 04:20:03 +02:00
|
|
|
if (!found) {
|
2010-11-08 20:21:32 +01:00
|
|
|
char *s = authdir_type_to_string(type);
|
2007-05-09 04:20:03 +02:00
|
|
|
log_warn(LD_DIR, "Publishing server descriptor to directory authorities "
|
2007-05-09 06:15:46 +02:00
|
|
|
"of type '%s', but no authorities of that type listed!", s);
|
|
|
|
tor_free(s);
|
2007-05-09 04:20:03 +02:00
|
|
|
}
|
2004-05-13 01:48:57 +02:00
|
|
|
}
|
|
|
|
|
2012-12-11 18:44:18 +01:00
|
|
|
/** Return true iff, according to the values in <b>options</b>, we should be
|
|
|
|
* using directory guards for direct downloads of directory information. */
|
2015-09-07 21:04:51 +02:00
|
|
|
STATIC int
|
2012-12-11 18:44:18 +01:00
|
|
|
should_use_directory_guards(const or_options_t *options)
|
|
|
|
{
|
|
|
|
/* Public (non-bridge) servers never use directory guards. */
|
|
|
|
if (public_server_mode(options))
|
|
|
|
return 0;
|
2016-12-08 18:35:55 +01:00
|
|
|
/* If guards are disabled, we can't use directory guards.
|
2012-12-11 18:44:18 +01:00
|
|
|
*/
|
2016-12-08 18:35:55 +01:00
|
|
|
if (!options->UseEntryGuards)
|
2012-12-11 18:44:18 +01:00
|
|
|
return 0;
|
|
|
|
/* If we're configured to fetch directory info aggressively or of a
|
|
|
|
* nonstandard type, don't use directory guards. */
|
|
|
|
if (options->DownloadExtraInfo || options->FetchDirInfoEarly ||
|
2014-01-29 21:17:05 +01:00
|
|
|
options->FetchDirInfoExtraEarly || options->FetchUselessDescriptors)
|
2012-12-11 18:44:18 +01:00
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-08-06 08:20:51 +02:00
|
|
|
/** Pick an unconstrained directory server from among our guards, the latest
|
2012-12-11 18:44:18 +01:00
|
|
|
* networkstatus, or the fallback dirservers, for use in downloading
|
|
|
|
* information of type <b>type</b>, and return its routerstatus. */
|
2012-12-11 17:25:36 +01:00
|
|
|
static const routerstatus_t *
|
|
|
|
directory_pick_generic_dirserver(dirinfo_type_t type, int pds_flags,
|
2016-11-21 23:23:25 +01:00
|
|
|
uint8_t dir_purpose,
|
|
|
|
circuit_guard_state_t **guard_state_out)
|
2012-12-11 17:25:36 +01:00
|
|
|
{
|
2012-12-26 05:37:41 +01:00
|
|
|
const routerstatus_t *rs = NULL;
|
2012-12-11 18:44:18 +01:00
|
|
|
const or_options_t *options = get_options();
|
2012-12-11 17:25:36 +01:00
|
|
|
|
2012-12-11 18:44:18 +01:00
|
|
|
if (options->UseBridges)
|
|
|
|
log_warn(LD_BUG, "Called when we have UseBridges set.");
|
|
|
|
|
|
|
|
if (should_use_directory_guards(options)) {
|
2017-11-06 18:38:47 +01:00
|
|
|
const node_t *node = guards_choose_dirguard(dir_purpose, guard_state_out);
|
2012-12-11 18:44:18 +01:00
|
|
|
if (node)
|
|
|
|
rs = node->rs;
|
|
|
|
} else {
|
|
|
|
/* anybody with a non-zero dirport will do */
|
|
|
|
rs = router_pick_directory_server(type, pds_flags);
|
|
|
|
}
|
2012-12-11 17:25:36 +01:00
|
|
|
if (!rs) {
|
|
|
|
log_info(LD_DIR, "No router found for %s; falling back to "
|
|
|
|
"dirserver list.", dir_conn_purpose_to_string(dir_purpose));
|
|
|
|
rs = router_pick_fallback_dirserver(type, pds_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rs;
|
|
|
|
}
|
|
|
|
|
2017-05-03 20:19:46 +02:00
|
|
|
/**
|
|
|
|
* Set the extra fields in <b>req</b> that are used when requesting a
|
|
|
|
* consensus of type <b>resource</b>.
|
2017-05-03 20:29:06 +02:00
|
|
|
*
|
|
|
|
* Right now, these fields are if-modified-since and x-or-diff-from-consensus.
|
2004-05-13 01:48:57 +02:00
|
|
|
*/
|
2017-05-03 20:19:46 +02:00
|
|
|
static void
|
|
|
|
dir_consensus_request_set_additional_headers(directory_request_t *req,
|
|
|
|
const char *resource)
|
2004-05-13 01:48:57 +02:00
|
|
|
{
|
2007-10-28 21:30:21 +01:00
|
|
|
time_t if_modified_since = 0;
|
2017-05-03 20:29:06 +02:00
|
|
|
uint8_t or_diff_from[DIGEST256_LEN];
|
|
|
|
int or_diff_from_is_set = 0;
|
2017-05-03 20:21:01 +02:00
|
|
|
|
2017-05-04 14:24:14 +02:00
|
|
|
/* DEFAULT_IF_MODIFIED_SINCE_DELAY is 1/20 of the default consensus
|
|
|
|
* period of 1 hour.
|
|
|
|
*/
|
|
|
|
const int DEFAULT_IF_MODIFIED_SINCE_DELAY = 180;
|
2017-05-04 18:15:48 +02:00
|
|
|
const int32_t DEFAULT_TRY_DIFF_FOR_CONSENSUS_NEWER = 72;
|
|
|
|
const int32_t MIN_TRY_DIFF_FOR_CONSENSUS_NEWER = 0;
|
|
|
|
const int32_t MAX_TRY_DIFF_FOR_CONSENSUS_NEWER = 8192;
|
|
|
|
const char TRY_DIFF_FOR_CONSENSUS_NEWER_NAME[] =
|
|
|
|
"try-diff-for-consensus-newer-than";
|
2017-05-04 14:24:14 +02:00
|
|
|
|
2017-05-03 20:21:01 +02:00
|
|
|
int flav = FLAV_NS;
|
|
|
|
if (resource)
|
|
|
|
flav = networkstatus_parse_flavor_name(resource);
|
|
|
|
|
2017-05-04 18:15:48 +02:00
|
|
|
int32_t max_age_for_diff = 3600 *
|
|
|
|
networkstatus_get_param(NULL,
|
|
|
|
TRY_DIFF_FOR_CONSENSUS_NEWER_NAME,
|
|
|
|
DEFAULT_TRY_DIFF_FOR_CONSENSUS_NEWER,
|
|
|
|
MIN_TRY_DIFF_FOR_CONSENSUS_NEWER,
|
|
|
|
MAX_TRY_DIFF_FOR_CONSENSUS_NEWER);
|
|
|
|
|
2017-05-03 20:21:01 +02:00
|
|
|
if (flav != -1) {
|
|
|
|
/* IF we have a parsed consensus of this type, we can do an
|
|
|
|
* if-modified-time based on it. */
|
2017-05-03 20:29:06 +02:00
|
|
|
networkstatus_t *v;
|
2017-05-03 20:21:01 +02:00
|
|
|
v = networkstatus_get_latest_consensus_by_flavor(flav);
|
|
|
|
if (v) {
|
|
|
|
/* In networks with particularly short V3AuthVotingIntervals,
|
|
|
|
* ask for the consensus if it's been modified since half the
|
|
|
|
* V3AuthVotingInterval of the most recent consensus. */
|
|
|
|
time_t ims_delay = DEFAULT_IF_MODIFIED_SINCE_DELAY;
|
|
|
|
if (v->fresh_until > v->valid_after
|
|
|
|
&& ims_delay > (v->fresh_until - v->valid_after)/2) {
|
|
|
|
ims_delay = (v->fresh_until - v->valid_after)/2;
|
2014-12-20 11:59:17 +01:00
|
|
|
}
|
2017-05-03 20:21:01 +02:00
|
|
|
if_modified_since = v->valid_after + ims_delay;
|
2017-05-04 18:15:48 +02:00
|
|
|
if (v->valid_after >= approx_time() - max_age_for_diff) {
|
|
|
|
memcpy(or_diff_from, v->digest_sha3_as_signed, DIGEST256_LEN);
|
|
|
|
or_diff_from_is_set = 1;
|
|
|
|
}
|
2010-09-17 04:12:03 +02:00
|
|
|
}
|
2017-05-03 20:21:01 +02:00
|
|
|
} else {
|
|
|
|
/* Otherwise it might be a consensus we don't parse, but which we
|
|
|
|
* do cache. Look at the cached copy, perhaps. */
|
|
|
|
cached_dir_t *cd = dirserv_get_consensus(resource);
|
|
|
|
/* We have no method of determining the voting interval from an
|
|
|
|
* unparsed consensus, so we use the default. */
|
2017-05-03 20:29:06 +02:00
|
|
|
if (cd) {
|
2017-05-03 20:21:01 +02:00
|
|
|
if_modified_since = cd->published + DEFAULT_IF_MODIFIED_SINCE_DELAY;
|
2017-05-04 18:15:48 +02:00
|
|
|
if (cd->published >= approx_time() - max_age_for_diff) {
|
|
|
|
memcpy(or_diff_from, cd->digest_sha3_as_signed, DIGEST256_LEN);
|
|
|
|
or_diff_from_is_set = 1;
|
|
|
|
}
|
2017-05-03 20:29:06 +02:00
|
|
|
}
|
2007-10-28 21:30:21 +01:00
|
|
|
}
|
2017-05-03 20:21:01 +02:00
|
|
|
|
2017-05-03 20:29:06 +02:00
|
|
|
if (if_modified_since > 0)
|
|
|
|
directory_request_set_if_modified_since(req, if_modified_since);
|
|
|
|
if (or_diff_from_is_set) {
|
|
|
|
char hex[HEX_DIGEST256_LEN + 1];
|
|
|
|
base16_encode(hex, sizeof(hex),
|
|
|
|
(const char*)or_diff_from, sizeof(or_diff_from));
|
|
|
|
directory_request_add_header(req, X_OR_DIFF_FROM_CONSENSUS_HEADER, hex);
|
|
|
|
}
|
2017-05-03 20:19:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Start a connection to a random running directory server, using
|
|
|
|
* connection purpose <b>dir_purpose</b>, intending to fetch descriptors
|
|
|
|
* of purpose <b>router_purpose</b>, and requesting <b>resource</b>.
|
|
|
|
* Use <b>pds_flags</b> as arguments to router_pick_directory_server()
|
|
|
|
* or router_pick_trusteddirserver().
|
|
|
|
*/
|
2017-05-26 20:05:50 +02:00
|
|
|
MOCK_IMPL(void,
|
|
|
|
directory_get_from_dirserver,(
|
2017-05-03 20:19:46 +02:00
|
|
|
uint8_t dir_purpose,
|
|
|
|
uint8_t router_purpose,
|
|
|
|
const char *resource,
|
|
|
|
int pds_flags,
|
|
|
|
download_want_authority_t want_authority))
|
|
|
|
{
|
|
|
|
const routerstatus_t *rs = NULL;
|
|
|
|
const or_options_t *options = get_options();
|
|
|
|
int prefer_authority = (directory_fetches_from_authorities(options)
|
|
|
|
|| want_authority == DL_WANT_AUTHORITY);
|
|
|
|
int require_authority = 0;
|
|
|
|
int get_via_tor = purpose_needs_anonymity(dir_purpose, router_purpose,
|
|
|
|
resource);
|
|
|
|
dirinfo_type_t type = dir_fetch_type(dir_purpose, router_purpose, resource);
|
|
|
|
|
|
|
|
if (type == NO_DIRINFO)
|
|
|
|
return;
|
2007-10-28 21:30:21 +01:00
|
|
|
|
2014-02-12 14:36:08 +01:00
|
|
|
if (!options->FetchServerDescriptors)
|
2006-02-19 23:02:02 +01:00
|
|
|
return;
|
|
|
|
|
2016-11-21 23:23:25 +01:00
|
|
|
circuit_guard_state_t *guard_state = NULL;
|
2007-09-21 08:14:36 +02:00
|
|
|
if (!get_via_tor) {
|
2014-10-01 11:04:04 +02:00
|
|
|
if (options->UseBridges && !(type & BRIDGE_DIRINFO)) {
|
2012-01-26 00:54:59 +01:00
|
|
|
/* We want to ask a running bridge for which we have a descriptor.
|
|
|
|
*
|
2012-01-26 02:49:32 +01:00
|
|
|
* When we ask choose_random_entry() for a bridge, we specify what
|
|
|
|
* sort of dir fetch we'll be doing, so it won't return a bridge
|
|
|
|
* that can't answer our question.
|
2012-01-26 00:54:59 +01:00
|
|
|
*/
|
2017-11-06 18:38:47 +01:00
|
|
|
const node_t *node = guards_choose_dirguard(dir_purpose, &guard_state);
|
Initial conversion to use node_t throughout our codebase.
A node_t is an abstraction over routerstatus_t, routerinfo_t, and
microdesc_t. It should try to present a consistent interface to all
of them. There should be a node_t for a server whenever there is
* A routerinfo_t for it in the routerlist
* A routerstatus_t in the current_consensus.
(note that a microdesc_t alone isn't enough to make a node_t exist,
since microdescriptors aren't usable on their own.)
There are three ways to get a node_t right now: looking it up by ID,
looking it up by nickname, and iterating over the whole list of
microdescriptors.
All (or nearly all) functions that are supposed to return "a router"
-- especially those used in building connections and circuits --
should return a node_t, not a routerinfo_t or a routerstatus_t.
A node_t should hold all the *mutable* flags about a node. This
patch moves the is_foo flags from routerinfo_t into node_t. The
flags in routerstatus_t remain, but they get set from the consensus
and should not change.
Some other highlights of this patch are:
* Looking up routerinfo and routerstatus by nickname is now
unified and based on the "look up a node by nickname" function.
This tries to look only at the values from current consensus,
and not get confused by the routerinfo_t->is_named flag, which
could get set for other weird reasons. This changes the
behavior of how authorities (when acting as clients) deal with
nodes that have been listed by nickname.
* I tried not to artificially increase the size of the diff here
by moving functions around. As a result, some functions that
now operate on nodes are now in the wrong file -- they should
get moved to nodelist.c once this refactoring settles down.
This moving should happen as part of a patch that moves
functions AND NOTHING ELSE.
* Some old code is now left around inside #if 0/1 blocks, and
should get removed once I've verified that I don't want it
sitting around to see how we used to do things.
There are still some unimplemented functions: these are flagged
with "UNIMPLEMENTED_NODELIST()." I'll work on filling in the
implementation here, piece by piece.
I wish this patch could have been smaller, but there did not seem to
be any piece of it that was independent from the rest. Moving flags
forces many functions that once returned routerinfo_t * to return
node_t *, which forces their friends to change, and so on.
2010-09-29 21:00:41 +02:00
|
|
|
if (node && node->ri) {
|
|
|
|
/* every bridge has a routerinfo. */
|
|
|
|
routerinfo_t *ri = node->ri;
|
2016-01-03 08:20:37 +01:00
|
|
|
/* clients always make OR connections to bridges */
|
|
|
|
tor_addr_port_t or_ap;
|
2017-04-21 20:17:14 +02:00
|
|
|
directory_request_t *req = directory_request_new(dir_purpose);
|
2016-01-03 08:20:37 +01:00
|
|
|
/* we are willing to use a non-preferred address if we need to */
|
|
|
|
fascist_firewall_choose_address_node(node, FIREWALL_OR_CONNECTION, 0,
|
|
|
|
&or_ap);
|
2017-04-21 20:17:14 +02:00
|
|
|
directory_request_set_or_addr_port(req, &or_ap);
|
|
|
|
directory_request_set_directory_id_digest(req,
|
|
|
|
ri->cache_info.identity_digest);
|
|
|
|
directory_request_set_router_purpose(req, router_purpose);
|
|
|
|
directory_request_set_resource(req, resource);
|
2017-05-03 20:19:46 +02:00
|
|
|
if (dir_purpose == DIR_PURPOSE_FETCH_CONSENSUS)
|
|
|
|
dir_consensus_request_set_additional_headers(req, resource);
|
2017-04-21 20:17:14 +02:00
|
|
|
directory_request_set_guard_state(req, guard_state);
|
|
|
|
directory_initiate_request(req);
|
|
|
|
directory_request_free(req);
|
2016-11-29 17:26:55 +01:00
|
|
|
} else {
|
|
|
|
if (guard_state) {
|
|
|
|
entry_guard_cancel(&guard_state);
|
|
|
|
}
|
2007-09-21 08:14:36 +02:00
|
|
|
log_notice(LD_DIR, "Ignoring directory request, since no bridge "
|
|
|
|
"nodes are available yet.");
|
2016-11-29 17:26:55 +01:00
|
|
|
}
|
|
|
|
|
2007-09-21 08:14:36 +02:00
|
|
|
return;
|
|
|
|
} else {
|
2014-10-01 11:04:04 +02:00
|
|
|
if (prefer_authority || (type & BRIDGE_DIRINFO)) {
|
2007-09-21 08:14:36 +02:00
|
|
|
/* only ask authdirservers, and don't ask myself */
|
2008-12-11 20:12:45 +01:00
|
|
|
rs = router_pick_trusteddirserver(type, pds_flags);
|
2010-05-11 23:20:33 +02:00
|
|
|
if (rs == NULL && (pds_flags & (PDS_NO_EXISTING_SERVERDESC_FETCH|
|
|
|
|
PDS_NO_EXISTING_MICRODESC_FETCH))) {
|
2008-12-11 20:12:48 +01:00
|
|
|
/* We don't want to fetch from any authorities that we're currently
|
|
|
|
* fetching server descriptors from, and we got no match. Did we
|
|
|
|
* get no match because all the authorities have connections
|
|
|
|
* fetching server descriptors (in which case we should just
|
|
|
|
* return,) or because all the authorities are down or on fire or
|
|
|
|
* unreachable or something (in which case we should go on with
|
|
|
|
* our fallback code)? */
|
2010-05-11 23:20:33 +02:00
|
|
|
pds_flags &= ~(PDS_NO_EXISTING_SERVERDESC_FETCH|
|
|
|
|
PDS_NO_EXISTING_MICRODESC_FETCH);
|
2008-12-11 20:12:48 +01:00
|
|
|
rs = router_pick_trusteddirserver(type, pds_flags);
|
|
|
|
if (rs) {
|
|
|
|
log_debug(LD_DIR, "Deferring serverdesc fetch: all authorities "
|
|
|
|
"are in use.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2012-04-17 18:09:19 +02:00
|
|
|
if (rs == NULL && require_authority) {
|
|
|
|
log_info(LD_DIR, "No authorities were available for %s: will try "
|
|
|
|
"later.", dir_conn_purpose_to_string(dir_purpose));
|
|
|
|
return;
|
|
|
|
}
|
2007-09-21 08:14:36 +02:00
|
|
|
}
|
2014-10-01 11:04:04 +02:00
|
|
|
if (!rs && !(type & BRIDGE_DIRINFO)) {
|
2012-12-11 17:25:36 +01:00
|
|
|
rs = directory_pick_generic_dirserver(type, pds_flags,
|
2016-11-21 23:23:25 +01:00
|
|
|
dir_purpose,
|
|
|
|
&guard_state);
|
2014-09-23 18:15:10 +02:00
|
|
|
if (!rs)
|
2012-12-11 17:25:36 +01:00
|
|
|
get_via_tor = 1; /* last resort: try routing it via Tor */
|
2004-10-14 03:44:32 +02:00
|
|
|
}
|
2004-07-20 08:44:16 +02:00
|
|
|
}
|
2014-09-23 18:15:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (get_via_tor) {
|
2004-10-15 06:57:36 +02:00
|
|
|
/* Never use fascistfirewall; we're going via Tor. */
|
2014-09-23 18:14:41 +02:00
|
|
|
pds_flags |= PDS_IGNORE_FASCISTFIREWALL;
|
|
|
|
rs = router_pick_directory_server(type, pds_flags);
|
2004-07-20 08:44:16 +02:00
|
|
|
}
|
|
|
|
|
2014-10-01 10:54:26 +02:00
|
|
|
/* If we have any hope of building an indirect conn, we know some router
|
|
|
|
* descriptors. If (rs==NULL), we can't build circuits anyway, so
|
|
|
|
* there's no point in falling back to the authorities in this case. */
|
2012-09-12 16:15:58 +02:00
|
|
|
if (rs) {
|
|
|
|
const dir_indirection_t indirection =
|
|
|
|
get_via_tor ? DIRIND_ANONYMOUS : DIRIND_ONEHOP;
|
2017-04-21 20:40:48 +02:00
|
|
|
directory_request_t *req = directory_request_new(dir_purpose);
|
|
|
|
directory_request_set_routerstatus(req, rs);
|
|
|
|
directory_request_set_router_purpose(req, router_purpose);
|
|
|
|
directory_request_set_indirection(req, indirection);
|
|
|
|
directory_request_set_resource(req, resource);
|
2017-05-03 20:19:46 +02:00
|
|
|
if (dir_purpose == DIR_PURPOSE_FETCH_CONSENSUS)
|
|
|
|
dir_consensus_request_set_additional_headers(req, resource);
|
2017-04-21 20:40:48 +02:00
|
|
|
if (guard_state)
|
|
|
|
directory_request_set_guard_state(req, guard_state);
|
|
|
|
directory_initiate_request(req);
|
|
|
|
directory_request_free(req);
|
2012-09-12 16:15:58 +02:00
|
|
|
} else {
|
2006-02-13 10:37:53 +01:00
|
|
|
log_notice(LD_DIR,
|
2006-10-15 09:42:51 +02:00
|
|
|
"While fetching directory info, "
|
|
|
|
"no running dirservers known. Will try again later. "
|
2007-06-10 09:34:21 +02:00
|
|
|
"(purpose %d)", dir_purpose);
|
2016-10-26 01:30:50 +02:00
|
|
|
if (!purpose_needs_anonymity(dir_purpose, router_purpose, resource)) {
|
2005-01-10 18:39:41 +01:00
|
|
|
/* remember we tried them all and failed. */
|
2005-02-05 22:42:46 +01:00
|
|
|
directory_all_unreachable(time(NULL));
|
2005-01-10 18:39:41 +01:00
|
|
|
}
|
2005-01-03 21:51:24 +01:00
|
|
|
}
|
2004-05-13 01:48:57 +02:00
|
|
|
}
|
|
|
|
|
2007-09-22 08:06:05 +02:00
|
|
|
/** As directory_get_from_dirserver, but initiates a request to <i>every</i>
|
|
|
|
* directory authority other than ourself. Only for use by authorities when
|
|
|
|
* searching for missing information while voting. */
|
|
|
|
void
|
|
|
|
directory_get_from_all_authorities(uint8_t dir_purpose,
|
|
|
|
uint8_t router_purpose,
|
|
|
|
const char *resource)
|
|
|
|
{
|
|
|
|
tor_assert(dir_purpose == DIR_PURPOSE_FETCH_STATUS_VOTE ||
|
|
|
|
dir_purpose == DIR_PURPOSE_FETCH_DETACHED_SIGNATURES);
|
|
|
|
|
2012-07-17 15:33:38 +02:00
|
|
|
SMARTLIST_FOREACH_BEGIN(router_get_trusted_dir_servers(),
|
2012-09-10 21:55:27 +02:00
|
|
|
dir_server_t *, ds) {
|
2007-09-22 08:06:05 +02:00
|
|
|
if (router_digest_is_me(ds->digest))
|
|
|
|
continue;
|
2010-11-08 20:35:02 +01:00
|
|
|
if (!(ds->type & V3_DIRINFO))
|
2007-09-22 08:06:05 +02:00
|
|
|
continue;
|
2017-04-21 20:31:35 +02:00
|
|
|
const routerstatus_t *rs = &ds->fake_status;
|
|
|
|
directory_request_t *req = directory_request_new(dir_purpose);
|
|
|
|
directory_request_set_routerstatus(req, rs);
|
|
|
|
directory_request_set_router_purpose(req, router_purpose);
|
|
|
|
directory_request_set_resource(req, resource);
|
|
|
|
directory_initiate_request(req);
|
|
|
|
directory_request_free(req);
|
2012-07-17 15:33:38 +02:00
|
|
|
} SMARTLIST_FOREACH_END(ds);
|
2007-09-22 08:06:05 +02:00
|
|
|
}
|
|
|
|
|
2012-09-12 16:15:58 +02:00
|
|
|
/** Return true iff <b>ind</b> requires a multihop circuit. */
|
|
|
|
static int
|
|
|
|
dirind_is_anon(dir_indirection_t ind)
|
|
|
|
{
|
|
|
|
return ind == DIRIND_ANON_DIRPORT || ind == DIRIND_ANONYMOUS;
|
|
|
|
}
|
|
|
|
|
2016-01-03 08:20:37 +01:00
|
|
|
/* Choose reachable OR and Dir addresses and ports from status, copying them
|
|
|
|
* into use_or_ap and use_dir_ap. If indirection is anonymous, then we're
|
|
|
|
* connecting via another relay, so choose the primary IPv4 address and ports.
|
|
|
|
*
|
|
|
|
* status should have at least one reachable address, if we can't choose a
|
|
|
|
* reachable address, warn and return -1. Otherwise, return 0.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
directory_choose_address_routerstatus(const routerstatus_t *status,
|
|
|
|
dir_indirection_t indirection,
|
|
|
|
tor_addr_port_t *use_or_ap,
|
|
|
|
tor_addr_port_t *use_dir_ap)
|
|
|
|
{
|
|
|
|
tor_assert(status != NULL);
|
|
|
|
tor_assert(use_or_ap != NULL);
|
|
|
|
tor_assert(use_dir_ap != NULL);
|
|
|
|
|
2016-04-28 08:07:47 +02:00
|
|
|
const or_options_t *options = get_options();
|
2016-01-03 08:20:37 +01:00
|
|
|
int have_or = 0, have_dir = 0;
|
|
|
|
|
|
|
|
/* We expect status to have at least one reachable address if we're
|
|
|
|
* connecting to it directly.
|
|
|
|
*
|
|
|
|
* Therefore, we can simply use the other address if the one we want isn't
|
|
|
|
* allowed by the firewall.
|
|
|
|
*
|
|
|
|
* (When Tor uploads and downloads a hidden service descriptor, it uses
|
|
|
|
* DIRIND_ANONYMOUS, except for Tor2Web, which uses DIRIND_ONEHOP.
|
|
|
|
* So this code will only modify the address for Tor2Web's HS descriptor
|
|
|
|
* fetches. Even Single Onion Servers (NYI) use DIRIND_ANONYMOUS, to avoid
|
|
|
|
* HSDirs denying service by rejecting descriptors.)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Initialise the OR / Dir addresses */
|
|
|
|
tor_addr_make_null(&use_or_ap->addr, AF_UNSPEC);
|
|
|
|
use_or_ap->port = 0;
|
|
|
|
tor_addr_make_null(&use_dir_ap->addr, AF_UNSPEC);
|
|
|
|
use_dir_ap->port = 0;
|
|
|
|
|
2016-04-28 08:03:23 +02:00
|
|
|
/* ORPort connections */
|
|
|
|
if (indirection == DIRIND_ANONYMOUS) {
|
|
|
|
if (status->addr) {
|
|
|
|
/* Since we're going to build a 3-hop circuit and ask the 2nd relay
|
|
|
|
* to extend to this address, always use the primary (IPv4) OR address */
|
|
|
|
tor_addr_from_ipv4h(&use_or_ap->addr, status->addr);
|
|
|
|
use_or_ap->port = status->or_port;
|
|
|
|
have_or = 1;
|
|
|
|
}
|
|
|
|
} else if (indirection == DIRIND_ONEHOP) {
|
2016-01-03 08:20:37 +01:00
|
|
|
/* We use an IPv6 address if we have one and we prefer it.
|
|
|
|
* Use the preferred address and port if they are reachable, otherwise,
|
|
|
|
* use the alternate address and port (if any).
|
|
|
|
*/
|
|
|
|
have_or = fascist_firewall_choose_address_rs(status,
|
|
|
|
FIREWALL_OR_CONNECTION, 0,
|
|
|
|
use_or_ap);
|
|
|
|
}
|
|
|
|
|
2016-04-28 08:03:23 +02:00
|
|
|
/* DirPort connections
|
2016-04-28 08:07:47 +02:00
|
|
|
* DIRIND_ONEHOP uses ORPort, but may fall back to the DirPort on relays */
|
2016-04-28 08:03:23 +02:00
|
|
|
if (indirection == DIRIND_DIRECT_CONN ||
|
|
|
|
indirection == DIRIND_ANON_DIRPORT ||
|
2016-04-28 08:07:47 +02:00
|
|
|
(indirection == DIRIND_ONEHOP
|
|
|
|
&& !directory_must_use_begindir(options))) {
|
2016-04-28 08:03:23 +02:00
|
|
|
have_dir = fascist_firewall_choose_address_rs(status,
|
|
|
|
FIREWALL_DIR_CONNECTION, 0,
|
|
|
|
use_dir_ap);
|
|
|
|
}
|
2016-01-03 08:20:37 +01:00
|
|
|
|
2016-03-24 10:59:49 +01:00
|
|
|
/* We rejected all addresses in the relay's status. This means we can't
|
|
|
|
* connect to it. */
|
2016-01-03 08:20:37 +01:00
|
|
|
if (!have_or && !have_dir) {
|
2016-03-24 00:38:07 +01:00
|
|
|
static int logged_backtrace = 0;
|
|
|
|
log_info(LD_BUG, "Rejected all OR and Dir addresses from %s when "
|
2016-03-24 10:59:49 +01:00
|
|
|
"launching an outgoing directory connection to: IPv4 %s OR %d "
|
|
|
|
"Dir %d IPv6 %s OR %d Dir %d", routerstatus_describe(status),
|
2016-01-03 08:20:37 +01:00
|
|
|
fmt_addr32(status->addr), status->or_port,
|
|
|
|
status->dir_port, fmt_addr(&status->ipv6_addr),
|
|
|
|
status->ipv6_orport, status->dir_port);
|
2016-03-24 00:38:07 +01:00
|
|
|
if (!logged_backtrace) {
|
|
|
|
log_backtrace(LOG_INFO, LD_BUG, "Addresses came from");
|
|
|
|
logged_backtrace = 1;
|
|
|
|
}
|
2016-01-03 08:20:37 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-01-23 20:22:52 +01:00
|
|
|
/** Return true iff <b>conn</b> is the client side of a directory connection
|
|
|
|
* we launched to ourself in order to determine the reachability of our
|
|
|
|
* dir_port. */
|
2007-01-06 06:42:31 +01:00
|
|
|
static int
|
|
|
|
directory_conn_is_self_reachability_test(dir_connection_t *conn)
|
|
|
|
{
|
|
|
|
if (conn->requested_resource &&
|
|
|
|
!strcmpstart(conn->requested_resource,"authority")) {
|
2010-09-29 06:38:32 +02:00
|
|
|
const routerinfo_t *me = router_get_my_routerinfo();
|
2007-01-06 06:42:31 +01:00
|
|
|
if (me &&
|
|
|
|
router_digest_is_me(conn->identity_digest) &&
|
2012-10-12 18:22:13 +02:00
|
|
|
tor_addr_eq_ipv4h(&conn->base_.addr, me->addr) && /*XXXX prop 118*/
|
|
|
|
me->dir_port == conn->base_.port)
|
2007-01-06 06:42:31 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-12-25 03:47:37 +01:00
|
|
|
/** Called when we are unable to complete the client's request to a directory
|
|
|
|
* server due to a network error: Mark the router as down and try again if
|
|
|
|
* possible.
|
2005-01-03 21:07:07 +01:00
|
|
|
*/
|
2011-06-22 20:08:30 +02:00
|
|
|
static void
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_dir_request_failed(dir_connection_t *conn)
|
2005-01-03 21:07:07 +01:00
|
|
|
{
|
2016-11-21 23:23:25 +01:00
|
|
|
if (conn->guard_state) {
|
|
|
|
/* We haven't seen a success on this guard state, so consider it to have
|
|
|
|
* failed. */
|
2016-11-28 17:04:28 +01:00
|
|
|
entry_guard_failed(&conn->guard_state);
|
2016-11-21 23:23:25 +01:00
|
|
|
}
|
2007-01-29 19:13:37 +01:00
|
|
|
if (directory_conn_is_self_reachability_test(conn)) {
|
2005-09-14 05:49:17 +02:00
|
|
|
return; /* this was a test fetch. don't retry. */
|
2007-01-29 19:13:37 +01:00
|
|
|
}
|
2010-09-12 06:20:00 +02:00
|
|
|
if (!entry_list_is_constrained(get_options()))
|
2016-01-26 03:45:01 +01:00
|
|
|
router_set_status(conn->identity_digest, 0); /* don't try this one again */
|
2014-01-29 21:17:05 +01:00
|
|
|
if (conn->base_.purpose == DIR_PURPOSE_FETCH_SERVERDESC ||
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO) {
|
2010-05-11 23:20:33 +02:00
|
|
|
log_info(LD_DIR, "Giving up on serverdesc/extrainfo fetch from "
|
|
|
|
"directory server at '%s'; retrying",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address);
|
2010-08-10 23:07:50 +02:00
|
|
|
if (conn->router_purpose == ROUTER_PURPOSE_BRIDGE)
|
|
|
|
connection_dir_bridge_routerdesc_failed(conn);
|
2005-09-15 07:19:38 +02:00
|
|
|
connection_dir_download_routerdesc_failed(conn);
|
2012-10-12 18:22:13 +02:00
|
|
|
} else if (conn->base_.purpose == DIR_PURPOSE_FETCH_CONSENSUS) {
|
2011-02-23 18:32:15 +01:00
|
|
|
if (conn->requested_resource)
|
|
|
|
networkstatus_consensus_download_failed(0, conn->requested_resource);
|
2012-10-12 18:22:13 +02:00
|
|
|
} else if (conn->base_.purpose == DIR_PURPOSE_FETCH_CERTIFICATE) {
|
2010-05-11 23:20:33 +02:00
|
|
|
log_info(LD_DIR, "Giving up on certificate fetch from directory server "
|
|
|
|
"at '%s'; retrying",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address);
|
2007-10-09 17:27:45 +02:00
|
|
|
connection_dir_download_cert_failed(conn, 0);
|
2012-10-12 18:22:13 +02:00
|
|
|
} else if (conn->base_.purpose == DIR_PURPOSE_FETCH_DETACHED_SIGNATURES) {
|
2007-10-22 18:32:06 +02:00
|
|
|
log_info(LD_DIR, "Giving up downloading detached signatures from '%s'",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address);
|
|
|
|
} else if (conn->base_.purpose == DIR_PURPOSE_FETCH_STATUS_VOTE) {
|
2007-10-22 18:32:06 +02:00
|
|
|
log_info(LD_DIR, "Giving up downloading votes from '%s'",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address);
|
|
|
|
} else if (conn->base_.purpose == DIR_PURPOSE_FETCH_MICRODESC) {
|
2010-05-11 23:20:33 +02:00
|
|
|
log_info(LD_DIR, "Giving up on downloading microdescriptors from "
|
2013-09-29 12:15:00 +02:00
|
|
|
"directory server at '%s'; will retry", conn->base_.address);
|
2010-05-11 23:20:33 +02:00
|
|
|
connection_dir_download_routerdesc_failed(conn);
|
2005-01-03 21:07:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-10 23:07:50 +02:00
|
|
|
/** Helper: Attempt to fetch directly the descriptors of each bridge
|
|
|
|
* listed in <b>failed</b>.
|
|
|
|
*/
|
|
|
|
static void
|
2010-09-13 03:12:17 +02:00
|
|
|
connection_dir_retry_bridges(smartlist_t *descs)
|
2010-08-10 23:07:50 +02:00
|
|
|
{
|
|
|
|
char digest[DIGEST_LEN];
|
2010-08-31 21:45:44 +02:00
|
|
|
SMARTLIST_FOREACH(descs, const char *, cp,
|
2010-08-10 23:07:50 +02:00
|
|
|
{
|
2016-06-17 16:41:45 +02:00
|
|
|
if (base16_decode(digest, DIGEST_LEN, cp, strlen(cp)) != DIGEST_LEN) {
|
2010-08-10 23:07:50 +02:00
|
|
|
log_warn(LD_BUG, "Malformed fingerprint in list: %s",
|
|
|
|
escaped(cp));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
retry_bridge_descriptor_fetch_directly(digest);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2005-09-30 01:26:42 +02:00
|
|
|
/** Called when an attempt to download one or more router descriptors
|
2007-05-18 23:19:19 +02:00
|
|
|
* or extra-info documents on connection <b>conn</b> failed.
|
2005-09-15 07:19:38 +02:00
|
|
|
*/
|
|
|
|
static void
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_dir_download_routerdesc_failed(dir_connection_t *conn)
|
2005-09-15 07:19:38 +02:00
|
|
|
{
|
2007-02-08 23:08:04 +01:00
|
|
|
/* No need to increment the failure count for routerdescs, since
|
2007-01-27 20:02:37 +01:00
|
|
|
* it's not their fault. */
|
2007-02-08 23:08:04 +01:00
|
|
|
|
2007-02-24 22:21:38 +01:00
|
|
|
/* No need to relaunch descriptor downloads here: we already do it
|
2008-06-24 10:00:30 +02:00
|
|
|
* every 10 or 60 seconds (FOO_DESCRIPTOR_RETRY_INTERVAL) in main.c. */
|
2012-10-12 18:22:13 +02:00
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_SERVERDESC ||
|
|
|
|
conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO ||
|
|
|
|
conn->base_.purpose == DIR_PURPOSE_FETCH_MICRODESC);
|
2007-02-22 08:41:10 +01:00
|
|
|
|
2006-06-05 00:42:13 +02:00
|
|
|
(void) conn;
|
2005-09-15 07:19:38 +02:00
|
|
|
}
|
|
|
|
|
2010-08-10 23:07:50 +02:00
|
|
|
/** Called when an attempt to download a bridge's routerdesc from
|
|
|
|
* one of the authorities failed due to a network error. If
|
|
|
|
* possible attempt to download descriptors from the bridge directly.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
connection_dir_bridge_routerdesc_failed(dir_connection_t *conn)
|
|
|
|
{
|
|
|
|
smartlist_t *which = NULL;
|
|
|
|
|
|
|
|
/* Requests for bridge descriptors are in the form 'fp/', so ignore
|
|
|
|
anything else. */
|
2010-09-12 15:10:16 +02:00
|
|
|
if (!conn->requested_resource || strcmpstart(conn->requested_resource,"fp/"))
|
2010-08-10 23:07:50 +02:00
|
|
|
return;
|
|
|
|
|
2012-01-18 21:53:30 +01:00
|
|
|
which = smartlist_new();
|
2010-08-31 21:45:44 +02:00
|
|
|
dir_split_resource_into_fingerprints(conn->requested_resource
|
|
|
|
+ strlen("fp/"),
|
|
|
|
which, NULL, 0);
|
2010-08-10 23:07:50 +02:00
|
|
|
|
2012-10-12 18:22:13 +02:00
|
|
|
tor_assert(conn->base_.purpose != DIR_PURPOSE_FETCH_EXTRAINFO);
|
2010-08-10 23:07:50 +02:00
|
|
|
if (smartlist_len(which)) {
|
2010-08-31 21:45:44 +02:00
|
|
|
connection_dir_retry_bridges(which);
|
2010-08-10 23:07:50 +02:00
|
|
|
SMARTLIST_FOREACH(which, char *, cp, tor_free(cp));
|
|
|
|
}
|
|
|
|
smartlist_free(which);
|
|
|
|
}
|
|
|
|
|
2007-10-09 17:27:45 +02:00
|
|
|
/** Called when an attempt to fetch a certificate fails. */
|
|
|
|
static void
|
|
|
|
connection_dir_download_cert_failed(dir_connection_t *conn, int status)
|
|
|
|
{
|
2013-05-09 17:23:53 +02:00
|
|
|
const char *fp_pfx = "fp/";
|
|
|
|
const char *fpsk_pfx = "fp-sk/";
|
2007-10-09 17:27:45 +02:00
|
|
|
smartlist_t *failed;
|
2012-10-12 18:22:13 +02:00
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_CERTIFICATE);
|
2007-10-09 17:27:45 +02:00
|
|
|
|
|
|
|
if (!conn->requested_resource)
|
|
|
|
return;
|
2012-01-18 21:53:30 +01:00
|
|
|
failed = smartlist_new();
|
2013-05-09 13:56:54 +02:00
|
|
|
/*
|
|
|
|
* We have two cases download by fingerprint (resource starts
|
|
|
|
* with "fp/") or download by fingerprint/signing key pair
|
|
|
|
* (resource starts with "fp-sk/").
|
|
|
|
*/
|
2013-05-09 17:23:53 +02:00
|
|
|
if (!strcmpstart(conn->requested_resource, fp_pfx)) {
|
2013-05-09 13:56:54 +02:00
|
|
|
/* Download by fingerprint case */
|
2013-05-09 17:23:53 +02:00
|
|
|
dir_split_resource_into_fingerprints(conn->requested_resource +
|
|
|
|
strlen(fp_pfx),
|
2013-05-09 13:56:54 +02:00
|
|
|
failed, NULL, DSR_HEX);
|
|
|
|
SMARTLIST_FOREACH_BEGIN(failed, char *, cp) {
|
|
|
|
/* Null signing key digest indicates download by fp only */
|
|
|
|
authority_cert_dl_failed(cp, NULL, status);
|
|
|
|
tor_free(cp);
|
|
|
|
} SMARTLIST_FOREACH_END(cp);
|
2013-05-09 17:23:53 +02:00
|
|
|
} else if (!strcmpstart(conn->requested_resource, fpsk_pfx)) {
|
2013-05-09 13:56:54 +02:00
|
|
|
/* Download by (fp,sk) pairs */
|
2013-05-09 17:23:53 +02:00
|
|
|
dir_split_resource_into_fingerprint_pairs(conn->requested_resource +
|
|
|
|
strlen(fpsk_pfx), failed);
|
2013-05-09 13:56:54 +02:00
|
|
|
SMARTLIST_FOREACH_BEGIN(failed, fp_pair_t *, cp) {
|
|
|
|
authority_cert_dl_failed(cp->first, cp->second, status);
|
|
|
|
tor_free(cp);
|
|
|
|
} SMARTLIST_FOREACH_END(cp);
|
|
|
|
} else {
|
|
|
|
log_warn(LD_DIR,
|
|
|
|
"Don't know what to do with failure for cert fetch %s",
|
|
|
|
conn->requested_resource);
|
|
|
|
}
|
|
|
|
|
2007-10-09 17:27:45 +02:00
|
|
|
smartlist_free(failed);
|
2007-10-22 19:31:22 +02:00
|
|
|
|
|
|
|
update_certificate_downloads(time(NULL));
|
2007-10-09 17:27:45 +02:00
|
|
|
}
|
|
|
|
|
2016-04-28 07:37:59 +02:00
|
|
|
/* Should this tor instance only use begindir for all its directory requests?
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
directory_must_use_begindir(const or_options_t *options)
|
|
|
|
{
|
|
|
|
/* Clients, onion services, and bridges must use begindir,
|
|
|
|
* relays and authorities do not have to */
|
|
|
|
return !public_server_mode(options);
|
|
|
|
}
|
|
|
|
|
2008-03-19 00:12:41 +01:00
|
|
|
/** Evaluate the situation and decide if we should use an encrypted
|
|
|
|
* "begindir-style" connection for this directory request.
|
2017-01-03 06:52:56 +01:00
|
|
|
* 0) If there is no DirPort, yes.
|
2008-03-19 00:12:41 +01:00
|
|
|
* 1) If or_port is 0, or it's a direct conn and or_port is firewalled
|
|
|
|
* or we're a dir mirror, no.
|
|
|
|
* 2) If we prefer to avoid begindir conns, and we're not fetching or
|
2010-01-12 20:05:12 +01:00
|
|
|
* publishing a bridge relay descriptor, no.
|
2008-03-19 00:12:41 +01:00
|
|
|
* 3) Else yes.
|
2016-04-28 07:37:59 +02:00
|
|
|
* If returning 0, return in *reason why we can't use begindir.
|
|
|
|
* reason must not be NULL.
|
2008-03-19 00:12:41 +01:00
|
|
|
*/
|
|
|
|
static int
|
2011-06-14 19:01:38 +02:00
|
|
|
directory_command_should_use_begindir(const or_options_t *options,
|
2017-04-21 21:55:23 +02:00
|
|
|
const directory_request_t *req,
|
2016-04-28 07:37:59 +02:00
|
|
|
const char **reason)
|
2008-03-19 00:12:41 +01:00
|
|
|
{
|
2017-04-21 21:55:23 +02:00
|
|
|
const tor_addr_t *or_addr = &req->or_addr_port.addr;
|
|
|
|
//const tor_addr_t *dir_addr = &req->dir_addr_port.addr;
|
|
|
|
const int or_port = req->or_addr_port.port;
|
|
|
|
const int dir_port = req->dir_addr_port.port;
|
|
|
|
|
|
|
|
const dir_indirection_t indirection = req->indirection;
|
|
|
|
|
2016-04-28 07:37:59 +02:00
|
|
|
tor_assert(reason);
|
|
|
|
*reason = NULL;
|
|
|
|
|
2017-01-03 06:52:56 +01:00
|
|
|
/* Reasons why we must use begindir */
|
|
|
|
if (!dir_port) {
|
|
|
|
*reason = "(using begindir - directory with no DirPort)";
|
|
|
|
return 1; /* We don't know a DirPort -- must begindir. */
|
|
|
|
}
|
2016-04-28 07:37:59 +02:00
|
|
|
/* Reasons why we can't possibly use begindir */
|
|
|
|
if (!or_port) {
|
|
|
|
*reason = "directory with unknown ORPort";
|
2008-03-19 00:12:41 +01:00
|
|
|
return 0; /* We don't know an ORPort -- no chance. */
|
2016-04-28 07:37:59 +02:00
|
|
|
}
|
|
|
|
if (indirection == DIRIND_DIRECT_CONN ||
|
|
|
|
indirection == DIRIND_ANON_DIRPORT) {
|
|
|
|
*reason = "DirPort connection";
|
2012-09-12 16:15:58 +02:00
|
|
|
return 0;
|
2016-04-28 07:37:59 +02:00
|
|
|
}
|
|
|
|
if (indirection == DIRIND_ONEHOP) {
|
|
|
|
/* We're firewalled and want a direct OR connection */
|
2017-01-03 06:52:56 +01:00
|
|
|
if (!fascist_firewall_allows_address_addr(or_addr, or_port,
|
2016-04-28 07:37:59 +02:00
|
|
|
FIREWALL_OR_CONNECTION, 0, 0)) {
|
|
|
|
*reason = "ORPort not reachable";
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Reasons why we want to avoid using begindir */
|
|
|
|
if (indirection == DIRIND_ONEHOP) {
|
|
|
|
if (!directory_must_use_begindir(options)) {
|
|
|
|
*reason = "in relay mode";
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* DIRIND_ONEHOP on a client, or DIRIND_ANONYMOUS
|
|
|
|
*/
|
|
|
|
*reason = "(using begindir)";
|
2008-03-19 00:12:41 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Create and return a new directory_request_t with purpose
|
|
|
|
* <b>dir_purpose</b>.
|
|
|
|
*/
|
2017-04-17 00:45:48 +02:00
|
|
|
directory_request_t *
|
|
|
|
directory_request_new(uint8_t dir_purpose)
|
|
|
|
{
|
|
|
|
tor_assert(dir_purpose >= DIR_PURPOSE_MIN_);
|
|
|
|
tor_assert(dir_purpose <= DIR_PURPOSE_MAX_);
|
|
|
|
tor_assert(dir_purpose != DIR_PURPOSE_SERVER);
|
|
|
|
tor_assert(dir_purpose != DIR_PURPOSE_HAS_FETCHED_RENDDESC_V2);
|
2017-06-27 15:46:16 +02:00
|
|
|
tor_assert(dir_purpose != DIR_PURPOSE_HAS_FETCHED_HSDESC);
|
2017-04-17 00:45:48 +02:00
|
|
|
|
|
|
|
directory_request_t *result = tor_malloc_zero(sizeof(*result));
|
2017-04-21 20:17:14 +02:00
|
|
|
tor_addr_make_null(&result->or_addr_port.addr, AF_INET);
|
|
|
|
result->or_addr_port.port = 0;
|
|
|
|
tor_addr_make_null(&result->dir_addr_port.addr, AF_INET);
|
|
|
|
result->dir_addr_port.port = 0;
|
2017-04-17 00:45:48 +02:00
|
|
|
result->dir_purpose = dir_purpose;
|
|
|
|
result->router_purpose = ROUTER_PURPOSE_GENERAL;
|
|
|
|
result->indirection = DIRIND_ONEHOP;
|
|
|
|
return result;
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Release all resources held by <b>req</b>.
|
|
|
|
*/
|
2017-04-17 00:45:48 +02:00
|
|
|
void
|
2017-11-21 15:37:47 +01:00
|
|
|
directory_request_free_(directory_request_t *req)
|
2017-04-17 00:45:48 +02:00
|
|
|
{
|
|
|
|
if (req == NULL)
|
|
|
|
return;
|
2017-05-03 20:04:04 +02:00
|
|
|
config_free_lines(req->additional_headers);
|
2017-04-17 00:45:48 +02:00
|
|
|
tor_free(req);
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Set the address and OR port to use for this directory request. If there is
|
|
|
|
* no OR port, we'll have to connect over the dirport. (If there are both,
|
2018-01-24 09:55:15 +01:00
|
|
|
* the indirection setting determines which to use.)
|
2017-04-21 21:33:29 +02:00
|
|
|
*/
|
2017-04-17 00:45:48 +02:00
|
|
|
void
|
|
|
|
directory_request_set_or_addr_port(directory_request_t *req,
|
|
|
|
const tor_addr_port_t *p)
|
|
|
|
{
|
|
|
|
memcpy(&req->or_addr_port, p, sizeof(*p));
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Set the address and dirport to use for this directory request. If there
|
|
|
|
* is no dirport, we'll have to connect over the OR port. (If there are both,
|
2018-01-24 09:55:15 +01:00
|
|
|
* the indirection setting determines which to use.)
|
2017-04-21 21:33:29 +02:00
|
|
|
*/
|
2017-04-17 00:45:48 +02:00
|
|
|
void
|
|
|
|
directory_request_set_dir_addr_port(directory_request_t *req,
|
|
|
|
const tor_addr_port_t *p)
|
|
|
|
{
|
|
|
|
memcpy(&req->dir_addr_port, p, sizeof(*p));
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Set the RSA identity digest of the directory to use for this directory
|
|
|
|
* request.
|
|
|
|
*/
|
2017-04-17 00:45:48 +02:00
|
|
|
void
|
|
|
|
directory_request_set_directory_id_digest(directory_request_t *req,
|
|
|
|
const char *digest)
|
|
|
|
{
|
|
|
|
memcpy(req->digest, digest, DIGEST_LEN);
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Set the router purpose associated with uploaded and downloaded router
|
|
|
|
* descriptors and extrainfo documents in this directory request. The purpose
|
|
|
|
* must be one of ROUTER_PURPOSE_GENERAL (the default) or
|
|
|
|
* ROUTER_PURPOSE_BRIDGE.
|
|
|
|
*/
|
2017-04-17 00:45:48 +02:00
|
|
|
void
|
|
|
|
directory_request_set_router_purpose(directory_request_t *req,
|
|
|
|
uint8_t router_purpose)
|
|
|
|
{
|
|
|
|
tor_assert(router_purpose == ROUTER_PURPOSE_GENERAL ||
|
|
|
|
router_purpose == ROUTER_PURPOSE_BRIDGE);
|
|
|
|
// assert that it actually makes sense to set this purpose, given
|
|
|
|
// the dir_purpose.
|
|
|
|
req->router_purpose = router_purpose;
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Set the indirection to be used for the directory request. The indirection
|
|
|
|
* parameter configures whether to connect to a DirPort or ORPort, and whether
|
|
|
|
* to anonymize the connection. DIRIND_ONEHOP (use ORPort, don't anonymize)
|
|
|
|
* is the default. See dir_indirection_t for more information.
|
|
|
|
*/
|
2017-04-17 00:45:48 +02:00
|
|
|
void
|
|
|
|
directory_request_set_indirection(directory_request_t *req,
|
|
|
|
dir_indirection_t indirection)
|
|
|
|
{
|
|
|
|
req->indirection = indirection;
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Set a pointer to the resource to request from a directory. Different
|
|
|
|
* request types use resources to indicate different components of their URL.
|
|
|
|
* Note that only an alias to <b>resource</b> is stored, so the
|
|
|
|
* <b>resource</b> must outlive the request.
|
|
|
|
*/
|
2017-04-17 00:45:48 +02:00
|
|
|
void
|
|
|
|
directory_request_set_resource(directory_request_t *req,
|
|
|
|
const char *resource)
|
|
|
|
{
|
|
|
|
req->resource = resource;
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Set a pointer to the payload to include with this directory request, along
|
|
|
|
* with its length. Note that only an alias to <b>payload</b> is stored, so
|
|
|
|
* the <b>payload</b> must outlive the request.
|
2015-12-18 01:29:47 +01:00
|
|
|
*/
|
2007-01-28 09:06:00 +01:00
|
|
|
void
|
2017-04-17 00:45:48 +02:00
|
|
|
directory_request_set_payload(directory_request_t *req,
|
|
|
|
const char *payload,
|
|
|
|
size_t payload_len)
|
2008-09-24 16:44:29 +02:00
|
|
|
{
|
2017-04-27 15:27:00 +02:00
|
|
|
tor_assert(DIR_PURPOSE_IS_UPLOAD(req->dir_purpose));
|
2015-12-18 01:29:47 +01:00
|
|
|
|
2017-04-17 00:45:48 +02:00
|
|
|
req->payload = payload;
|
|
|
|
req->payload_len = payload_len;
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Set an if-modified-since date to send along with the request. The
|
|
|
|
* default is 0 (meaning, send no if-modified-since header).
|
|
|
|
*/
|
2017-04-17 00:45:48 +02:00
|
|
|
void
|
|
|
|
directory_request_set_if_modified_since(directory_request_t *req,
|
|
|
|
time_t if_modified_since)
|
|
|
|
{
|
|
|
|
req->if_modified_since = if_modified_since;
|
|
|
|
}
|
2017-05-03 20:04:04 +02:00
|
|
|
|
|
|
|
/** Include a header of name <b>key</b> with content <b>val</b> in the
|
|
|
|
* request. Neither may include newlines or other odd characters. Their
|
|
|
|
* ordering is not currently guaranteed.
|
2017-05-04 14:57:34 +02:00
|
|
|
*
|
|
|
|
* Note that, as elsewhere in this module, header keys include a trailing
|
|
|
|
* colon and space.
|
2017-05-03 20:04:04 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
directory_request_add_header(directory_request_t *req,
|
|
|
|
const char *key,
|
|
|
|
const char *val)
|
|
|
|
{
|
|
|
|
config_line_prepend(&req->additional_headers, key, val);
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Set an object containing HS data to be associated with this request. Note
|
|
|
|
* that only an alias to <b>query</b> is stored, so the <b>query</b> object
|
|
|
|
* must outlive the request.
|
|
|
|
*/
|
2017-04-17 00:45:48 +02:00
|
|
|
void
|
|
|
|
directory_request_set_rend_query(directory_request_t *req,
|
|
|
|
const rend_data_t *query)
|
|
|
|
{
|
2017-04-21 21:33:29 +02:00
|
|
|
if (query) {
|
|
|
|
tor_assert(req->dir_purpose == DIR_PURPOSE_FETCH_RENDDESC_V2 ||
|
|
|
|
req->dir_purpose == DIR_PURPOSE_UPLOAD_RENDDESC_V2);
|
2015-12-18 01:29:47 +01:00
|
|
|
}
|
2017-04-17 00:45:48 +02:00
|
|
|
req->rend_query = query;
|
|
|
|
}
|
2017-04-19 20:36:53 +02:00
|
|
|
/**
|
|
|
|
* Set an object containing HS connection identifier to be associated with
|
|
|
|
* this request. Note that only an alias to <b>ident</b> is stored, so the
|
|
|
|
* <b>ident</b> object must outlive the request.
|
|
|
|
*/
|
|
|
|
void
|
2017-08-04 23:33:34 +02:00
|
|
|
directory_request_upload_set_hs_ident(directory_request_t *req,
|
|
|
|
const hs_ident_dir_conn_t *ident)
|
2017-04-19 20:36:53 +02:00
|
|
|
{
|
|
|
|
if (ident) {
|
2017-08-04 23:33:34 +02:00
|
|
|
tor_assert(req->dir_purpose == DIR_PURPOSE_UPLOAD_HSDESC);
|
2017-04-19 20:36:53 +02:00
|
|
|
}
|
|
|
|
req->hs_ident = ident;
|
|
|
|
}
|
2017-08-09 17:45:29 +02:00
|
|
|
/**
|
|
|
|
* Set an object containing HS connection identifier to be associated with
|
|
|
|
* this fetch request. Note that only an alias to <b>ident</b> is stored, so
|
|
|
|
* the <b>ident</b> object must outlive the request.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
directory_request_fetch_set_hs_ident(directory_request_t *req,
|
|
|
|
const hs_ident_dir_conn_t *ident)
|
|
|
|
{
|
|
|
|
if (ident) {
|
|
|
|
tor_assert(req->dir_purpose == DIR_PURPOSE_FETCH_HSDESC);
|
|
|
|
}
|
|
|
|
req->hs_ident = ident;
|
|
|
|
}
|
2017-04-21 21:36:08 +02:00
|
|
|
/** Set a static circuit_guard_state_t object to affliate with the request in
|
|
|
|
* <b>req</b>. This object will receive notification when the attempt to
|
|
|
|
* connect to the guard either succeeds or fails. */
|
2017-05-22 13:10:38 +02:00
|
|
|
void
|
2017-04-17 00:45:48 +02:00
|
|
|
directory_request_set_guard_state(directory_request_t *req,
|
|
|
|
circuit_guard_state_t *state)
|
|
|
|
{
|
|
|
|
req->guard_state = state;
|
|
|
|
}
|
2015-12-18 01:29:47 +01:00
|
|
|
|
2017-04-21 21:37:38 +02:00
|
|
|
/**
|
|
|
|
* Internal: Return true if any information for contacting the directory in
|
|
|
|
* <b>req</b> has been set, other than by the routerstatus. */
|
2017-04-21 20:17:14 +02:00
|
|
|
static int
|
2017-04-21 21:37:38 +02:00
|
|
|
directory_request_dir_contact_info_specified(const directory_request_t *req)
|
2004-05-13 01:48:57 +02:00
|
|
|
{
|
2017-04-21 21:37:38 +02:00
|
|
|
/* We only check for ports here, since we don't use an addr unless the port
|
|
|
|
* is set */
|
|
|
|
return (req->or_addr_port.port ||
|
|
|
|
req->dir_addr_port.port ||
|
|
|
|
! tor_digest_is_zero(req->digest));
|
2017-04-21 20:17:14 +02:00
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Set the routerstatus to use for the directory associated with this
|
|
|
|
* request. If this option is set, then no other function to set the
|
|
|
|
* directory's address or identity should be called.
|
|
|
|
*/
|
2017-04-21 20:17:14 +02:00
|
|
|
void
|
|
|
|
directory_request_set_routerstatus(directory_request_t *req,
|
|
|
|
const routerstatus_t *status)
|
|
|
|
{
|
|
|
|
req->routerstatus = status;
|
|
|
|
}
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Helper: update the addresses, ports, and identities in <b>req</b>
|
|
|
|
* from the routerstatus object in <b>req</b>. Return 0 on success.
|
|
|
|
* On failure, warn and return -1.
|
|
|
|
*/
|
2017-04-21 20:17:14 +02:00
|
|
|
static int
|
|
|
|
directory_request_set_dir_from_routerstatus(directory_request_t *req)
|
|
|
|
|
|
|
|
{
|
|
|
|
const routerstatus_t *status = req->routerstatus;
|
2017-04-21 21:33:29 +02:00
|
|
|
if (BUG(status == NULL))
|
2017-04-21 20:17:14 +02:00
|
|
|
return -1;
|
|
|
|
const or_options_t *options = get_options();
|
|
|
|
const node_t *node;
|
|
|
|
tor_addr_port_t use_or_ap, use_dir_ap;
|
|
|
|
const int anonymized_connection = dirind_is_anon(req->indirection);
|
|
|
|
|
|
|
|
tor_assert(status != NULL);
|
2017-04-17 00:45:48 +02:00
|
|
|
|
2017-04-21 20:17:14 +02:00
|
|
|
node = node_get_by_id(status->identity_digest);
|
2017-04-17 00:45:48 +02:00
|
|
|
|
2017-04-21 20:17:14 +02:00
|
|
|
/* XXX The below check is wrong: !node means it's not in the consensus,
|
|
|
|
* but we haven't checked if we have a descriptor for it -- and also,
|
|
|
|
* we only care about the descriptor if it's a begindir-style anonymized
|
|
|
|
* connection. */
|
|
|
|
if (!node && anonymized_connection) {
|
|
|
|
log_info(LD_DIR, "Not sending anonymized request to directory '%s'; we "
|
|
|
|
"don't have its router descriptor.",
|
|
|
|
routerstatus_describe(status));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (options->ExcludeNodes && options->StrictNodes &&
|
|
|
|
routerset_contains_routerstatus(options->ExcludeNodes, status, -1)) {
|
|
|
|
log_warn(LD_DIR, "Wanted to contact directory mirror %s for %s, but "
|
|
|
|
"it's in our ExcludedNodes list and StrictNodes is set. "
|
|
|
|
"Skipping. This choice might make your Tor not work.",
|
|
|
|
routerstatus_describe(status),
|
|
|
|
dir_conn_purpose_to_string(req->dir_purpose));
|
|
|
|
return -1;
|
2015-12-18 01:29:47 +01:00
|
|
|
}
|
|
|
|
|
2017-04-21 20:17:14 +02:00
|
|
|
/* At this point, if we are a client making a direct connection to a
|
|
|
|
* directory server, we have selected a server that has at least one address
|
|
|
|
* allowed by ClientUseIPv4/6 and Reachable{"",OR,Dir}Addresses. This
|
|
|
|
* selection uses the preference in ClientPreferIPv6{OR,Dir}Port, if
|
|
|
|
* possible. (If UseBridges is set, clients always use IPv6, and prefer it
|
|
|
|
* by default.)
|
|
|
|
*
|
|
|
|
* Now choose an address that we can use to connect to the directory server.
|
|
|
|
*/
|
|
|
|
if (directory_choose_address_routerstatus(status,
|
|
|
|
req->indirection, &use_or_ap,
|
|
|
|
&use_dir_ap) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
directory_request_set_or_addr_port(req, &use_or_ap);
|
|
|
|
directory_request_set_dir_addr_port(req, &use_dir_ap);
|
|
|
|
directory_request_set_directory_id_digest(req, status->identity_digest);
|
|
|
|
return 0;
|
2008-09-24 16:44:29 +02:00
|
|
|
}
|
|
|
|
|
2017-04-21 21:33:29 +02:00
|
|
|
/**
|
|
|
|
* Launch the provided directory request, configured in <b>request</b>.
|
|
|
|
* After this function is called, you can free <b>request</b>.
|
|
|
|
*/
|
2017-04-21 20:31:35 +02:00
|
|
|
MOCK_IMPL(void,
|
|
|
|
directory_initiate_request,(directory_request_t *request))
|
2004-05-13 01:48:57 +02:00
|
|
|
{
|
2017-04-21 20:17:14 +02:00
|
|
|
tor_assert(request);
|
|
|
|
if (request->routerstatus) {
|
2017-04-21 21:37:38 +02:00
|
|
|
tor_assert_nonfatal(
|
|
|
|
! directory_request_dir_contact_info_specified(request));
|
2017-04-21 20:17:14 +02:00
|
|
|
if (directory_request_set_dir_from_routerstatus(request) < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-17 00:45:48 +02:00
|
|
|
const tor_addr_port_t *or_addr_port = &request->or_addr_port;
|
|
|
|
const tor_addr_port_t *dir_addr_port = &request->dir_addr_port;
|
|
|
|
const char *digest = request->digest;
|
|
|
|
const uint8_t dir_purpose = request->dir_purpose;
|
|
|
|
const uint8_t router_purpose = request->router_purpose;
|
|
|
|
const dir_indirection_t indirection = request->indirection;
|
|
|
|
const char *resource = request->resource;
|
|
|
|
const rend_data_t *rend_query = request->rend_query;
|
2017-04-19 20:36:53 +02:00
|
|
|
const hs_ident_dir_conn_t *hs_ident = request->hs_ident;
|
2017-04-17 00:45:48 +02:00
|
|
|
circuit_guard_state_t *guard_state = request->guard_state;
|
|
|
|
|
2015-12-18 01:29:47 +01:00
|
|
|
tor_assert(or_addr_port->port || dir_addr_port->port);
|
|
|
|
tor_assert(digest);
|
|
|
|
|
2006-07-26 21:07:26 +02:00
|
|
|
dir_connection_t *conn;
|
2011-06-14 19:01:38 +02:00
|
|
|
const or_options_t *options = get_options();
|
2008-06-11 03:14:23 +02:00
|
|
|
int socket_error = 0;
|
2016-04-28 07:37:59 +02:00
|
|
|
const char *begindir_reason = NULL;
|
2016-03-24 23:53:33 +01:00
|
|
|
/* Should the connection be to a relay's OR port (and inside that we will
|
|
|
|
* send our directory request)? */
|
2017-04-21 21:55:23 +02:00
|
|
|
const int use_begindir =
|
|
|
|
directory_command_should_use_begindir(options, request, &begindir_reason);
|
2017-02-27 17:03:25 +01:00
|
|
|
|
2016-03-24 23:53:33 +01:00
|
|
|
/* Will the connection go via a three-hop Tor circuit? Note that this
|
|
|
|
* is separate from whether it will use_begindir. */
|
2012-09-12 16:15:58 +02:00
|
|
|
const int anonymized_connection = dirind_is_anon(indirection);
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2016-03-24 23:53:33 +01:00
|
|
|
/* What is the address we want to make the directory request to? If
|
|
|
|
* we're making a begindir request this is the ORPort of the relay
|
|
|
|
* we're contacting; if not a begindir request, this is its DirPort.
|
|
|
|
* Note that if anonymized_connection is true, we won't be initiating
|
|
|
|
* a connection directly to this address. */
|
2015-12-18 01:29:47 +01:00
|
|
|
tor_addr_t addr;
|
2016-03-24 23:53:33 +01:00
|
|
|
tor_addr_copy(&addr, &(use_begindir ? or_addr_port : dir_addr_port)->addr);
|
|
|
|
uint16_t port = (use_begindir ? or_addr_port : dir_addr_port)->port;
|
2008-08-05 22:08:19 +02:00
|
|
|
|
2007-10-28 09:16:19 +01:00
|
|
|
log_debug(LD_DIR, "anonymized %d, use_begindir %d.",
|
|
|
|
anonymized_connection, use_begindir);
|
2006-12-13 01:28:56 +01:00
|
|
|
|
2007-08-24 16:41:06 +02:00
|
|
|
log_debug(LD_DIR, "Initiating %s", dir_conn_purpose_to_string(dir_purpose));
|
2002-09-28 07:53:00 +02:00
|
|
|
|
2016-10-26 01:30:50 +02:00
|
|
|
if (purpose_needs_anonymity(dir_purpose, router_purpose, resource)) {
|
2016-07-14 06:04:02 +02:00
|
|
|
tor_assert(anonymized_connection ||
|
|
|
|
rend_non_anonymous_mode_enabled(options));
|
|
|
|
}
|
2011-06-14 01:12:47 +02:00
|
|
|
|
2016-04-28 07:37:59 +02:00
|
|
|
/* use encrypted begindir connections for everything except relays
|
|
|
|
* this provides better protection for directory fetches */
|
|
|
|
if (!use_begindir && directory_must_use_begindir(options)) {
|
|
|
|
log_warn(LD_BUG, "Client could not use begindir connection: %s",
|
|
|
|
begindir_reason ? begindir_reason : "(NULL)");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-06-19 01:59:18 +02:00
|
|
|
/* ensure that we don't make direct connections when a SOCKS server is
|
|
|
|
* configured. */
|
2016-03-24 23:53:33 +01:00
|
|
|
if (!anonymized_connection && !use_begindir && !options->HTTPProxy &&
|
2009-06-19 01:59:18 +02:00
|
|
|
(options->Socks4Proxy || options->Socks5Proxy)) {
|
|
|
|
log_warn(LD_DIR, "Cannot connect to a directory server through a "
|
|
|
|
"SOCKS proxy!");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-03-24 23:53:33 +01:00
|
|
|
/* Make sure that the destination addr and port we picked is viable. */
|
|
|
|
if (!port || tor_addr_is_null(&addr)) {
|
2016-03-24 00:38:07 +01:00
|
|
|
static int logged_backtrace = 0;
|
2016-03-24 23:53:33 +01:00
|
|
|
log_warn(LD_DIR,
|
2017-01-03 06:56:32 +01:00
|
|
|
"Cannot make an outgoing %sconnection without a remote %sPort.",
|
2016-03-24 23:53:33 +01:00
|
|
|
use_begindir ? "begindir " : "",
|
2017-01-03 06:56:32 +01:00
|
|
|
use_begindir ? "OR" : "Dir");
|
2016-03-24 00:38:07 +01:00
|
|
|
if (!logged_backtrace) {
|
|
|
|
log_backtrace(LOG_INFO, LD_BUG, "Address came from");
|
|
|
|
logged_backtrace = 1;
|
|
|
|
}
|
2015-12-18 01:29:47 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-11-24 19:01:56 +01:00
|
|
|
conn = dir_connection_new(tor_addr_family(&addr));
|
2002-09-26 14:09:10 +02:00
|
|
|
|
|
|
|
/* set up conn so it's got all the data we need to remember */
|
2012-10-12 18:22:13 +02:00
|
|
|
tor_addr_copy(&conn->base_.addr, &addr);
|
2015-12-18 01:29:47 +01:00
|
|
|
conn->base_.port = port;
|
2016-03-28 22:36:51 +02:00
|
|
|
conn->base_.address = tor_addr_to_str_dup(&addr);
|
2004-10-12 17:55:20 +02:00
|
|
|
memcpy(conn->identity_digest, digest, DIGEST_LEN);
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.purpose = dir_purpose;
|
2007-06-10 09:34:21 +02:00
|
|
|
conn->router_purpose = router_purpose;
|
2004-03-26 23:07:45 +01:00
|
|
|
|
2004-04-17 22:19:43 +02:00
|
|
|
/* give it an initial state */
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.state = DIR_CONN_STATE_CONNECTING;
|
2004-04-17 22:19:43 +02:00
|
|
|
|
2007-10-28 09:16:19 +01:00
|
|
|
/* decide whether we can learn our IP address from this conn */
|
2012-09-12 16:15:58 +02:00
|
|
|
/* XXXX This is a bad name for this field now. */
|
2007-10-28 09:16:19 +01:00
|
|
|
conn->dirconn_direct = !anonymized_connection;
|
|
|
|
|
2008-09-24 16:44:29 +02:00
|
|
|
/* copy rendezvous data, if any */
|
2017-04-19 20:36:53 +02:00
|
|
|
if (rend_query) {
|
|
|
|
/* We can't have both v2 and v3+ identifier. */
|
|
|
|
tor_assert_nonfatal(!hs_ident);
|
2008-09-24 16:44:29 +02:00
|
|
|
conn->rend_data = rend_data_dup(rend_query);
|
2017-04-19 20:36:53 +02:00
|
|
|
}
|
|
|
|
if (hs_ident) {
|
|
|
|
/* We can't have both v2 and v3+ identifier. */
|
|
|
|
tor_assert_nonfatal(!rend_query);
|
|
|
|
conn->hs_ident = hs_ident_dir_conn_dup(hs_ident);
|
|
|
|
}
|
2008-09-24 16:44:29 +02:00
|
|
|
|
2016-03-24 23:53:33 +01:00
|
|
|
if (!anonymized_connection && !use_begindir) {
|
2007-10-28 09:16:19 +01:00
|
|
|
/* then we want to connect to dirport directly */
|
2005-02-27 10:47:01 +01:00
|
|
|
|
2010-10-29 19:41:24 +02:00
|
|
|
if (options->HTTPProxy) {
|
|
|
|
tor_addr_copy(&addr, &options->HTTPProxyAddr);
|
2016-03-25 00:05:36 +01:00
|
|
|
port = options->HTTPProxyPort;
|
2005-02-27 10:47:01 +01:00
|
|
|
}
|
|
|
|
|
2016-11-21 23:23:25 +01:00
|
|
|
// In this case we should not have picked a directory guard.
|
|
|
|
if (BUG(guard_state)) {
|
2016-11-28 17:04:28 +01:00
|
|
|
entry_guard_cancel(&guard_state);
|
2016-11-21 23:23:25 +01:00
|
|
|
}
|
|
|
|
|
2012-10-12 18:22:13 +02:00
|
|
|
switch (connection_connect(TO_CONN(conn), conn->base_.address, &addr,
|
2016-03-25 00:05:36 +01:00
|
|
|
port, &socket_error)) {
|
2004-03-31 00:57:49 +02:00
|
|
|
case -1:
|
2015-11-17 15:40:05 +01:00
|
|
|
connection_mark_for_close(TO_CONN(conn));
|
2004-03-31 00:57:49 +02:00
|
|
|
return;
|
|
|
|
case 1:
|
2006-07-26 21:07:37 +02:00
|
|
|
/* start flushing conn */
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
|
2004-03-31 00:57:49 +02:00
|
|
|
/* fall through */
|
|
|
|
case 0:
|
2004-05-09 18:33:04 +02:00
|
|
|
/* queue the command on the outbuf */
|
2017-04-21 21:49:10 +02:00
|
|
|
directory_send_command(conn, 1, request);
|
2009-06-04 20:49:16 +02:00
|
|
|
connection_watch_events(TO_CONN(conn), READ_EVENT | WRITE_EVENT);
|
2004-03-31 00:57:49 +02:00
|
|
|
/* writable indicates finish, readable indicates broken link,
|
|
|
|
error indicates broken link in windowsland. */
|
|
|
|
}
|
2016-03-24 23:53:33 +01:00
|
|
|
} else {
|
|
|
|
/* We will use a Tor circuit (maybe 1-hop, maybe 3-hop, maybe with
|
|
|
|
* begindir, maybe not with begindir) */
|
|
|
|
|
2011-07-20 18:55:42 +02:00
|
|
|
entry_connection_t *linked_conn;
|
2016-03-24 23:53:33 +01:00
|
|
|
|
2011-07-08 21:54:30 +02:00
|
|
|
/* Anonymized tunneled connections can never share a circuit.
|
|
|
|
* One-hop directory connections can share circuits with each other
|
|
|
|
* but nothing else. */
|
|
|
|
int iso_flags = anonymized_connection ? ISO_STREAM : ISO_SESSIONGRP;
|
2008-02-19 22:30:24 +01:00
|
|
|
|
|
|
|
/* If it's an anonymized connection, remember the fact that we
|
|
|
|
* wanted it for later: maybe we'll want it again soon. */
|
|
|
|
if (anonymized_connection && use_begindir)
|
2008-02-19 22:41:43 +01:00
|
|
|
rep_hist_note_used_internal(time(NULL), 0, 1);
|
2008-02-19 22:30:24 +01:00
|
|
|
else if (anonymized_connection && !use_begindir)
|
2012-10-12 18:22:13 +02:00
|
|
|
rep_hist_note_used_port(time(NULL), conn->base_.port);
|
2008-02-19 22:30:24 +01:00
|
|
|
|
2016-11-21 23:23:25 +01:00
|
|
|
// In this case we should not have a directory guard; we'll
|
|
|
|
// get a regular guard later when we build the circuit.
|
|
|
|
if (BUG(anonymized_connection && guard_state)) {
|
2016-11-28 17:04:28 +01:00
|
|
|
entry_guard_cancel(&guard_state);
|
2016-11-21 23:23:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
conn->guard_state = guard_state;
|
|
|
|
|
2004-03-31 00:57:49 +02:00
|
|
|
/* make an AP connection
|
2004-07-20 08:44:16 +02:00
|
|
|
* populate it and add it at the right state
|
2007-04-25 09:04:53 +02:00
|
|
|
* hook up both sides
|
2004-03-31 00:57:49 +02:00
|
|
|
*/
|
2007-04-21 19:26:12 +02:00
|
|
|
linked_conn =
|
2009-08-11 21:16:16 +02:00
|
|
|
connection_ap_make_link(TO_CONN(conn),
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address, conn->base_.port,
|
2011-07-08 21:54:30 +02:00
|
|
|
digest,
|
|
|
|
SESSION_GROUP_DIRCONN, iso_flags,
|
2016-03-25 00:57:39 +01:00
|
|
|
use_begindir, !anonymized_connection);
|
2007-04-21 19:26:12 +02:00
|
|
|
if (!linked_conn) {
|
2007-06-09 07:17:33 +02:00
|
|
|
log_warn(LD_NET,"Making tunnel to dirserver failed.");
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_mark_for_close(TO_CONN(conn));
|
2004-04-01 00:02:13 +02:00
|
|
|
return;
|
|
|
|
}
|
2004-03-31 00:57:49 +02:00
|
|
|
|
2006-07-26 21:07:26 +02:00
|
|
|
if (connection_add(TO_CONN(conn)) < 0) {
|
2007-06-09 07:17:33 +02:00
|
|
|
log_warn(LD_NET,"Unable to add connection for link to dirserver.");
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_mark_for_close(TO_CONN(conn));
|
2006-01-10 20:24:40 +01:00
|
|
|
return;
|
|
|
|
}
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
|
2004-05-09 18:33:04 +02:00
|
|
|
/* queue the command on the outbuf */
|
2017-04-21 21:49:10 +02:00
|
|
|
directory_send_command(conn, 0, request);
|
2009-08-11 21:16:16 +02:00
|
|
|
|
2009-06-04 20:49:16 +02:00
|
|
|
connection_watch_events(TO_CONN(conn), READ_EVENT|WRITE_EVENT);
|
2016-08-02 19:15:10 +02:00
|
|
|
connection_start_reading(ENTRY_TO_CONN(linked_conn));
|
2002-09-26 14:09:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-10 18:48:36 +01:00
|
|
|
/** Return true iff anything we say on <b>conn</b> is being encrypted before
|
|
|
|
* we send it to the client/server. */
|
|
|
|
int
|
2017-03-13 20:38:20 +01:00
|
|
|
connection_dir_is_encrypted(const dir_connection_t *conn)
|
2008-01-10 18:48:36 +01:00
|
|
|
{
|
|
|
|
/* Right now it's sufficient to see if conn is or has been linked, since
|
|
|
|
* the only thing it could be linked to is an edge connection on a
|
|
|
|
* circuit, and the only way it could have been unlinked is at the edge
|
|
|
|
* connection getting closed.
|
|
|
|
*/
|
|
|
|
return TO_CONN(conn)->linked;
|
|
|
|
}
|
|
|
|
|
2008-04-24 17:43:25 +02:00
|
|
|
/** Helper for sorting
|
|
|
|
*
|
|
|
|
* sort strings alphabetically
|
|
|
|
*/
|
|
|
|
static int
|
2012-10-12 18:22:13 +02:00
|
|
|
compare_strs_(const void **a, const void **b)
|
2008-04-24 17:43:25 +02:00
|
|
|
{
|
|
|
|
const char *s1 = *a, *s2 = *b;
|
|
|
|
return strcmp(s1, s2);
|
|
|
|
}
|
|
|
|
|
2008-12-22 18:53:04 +01:00
|
|
|
#define CONDITIONAL_CONSENSUS_FPR_LEN 3
|
|
|
|
#if (CONDITIONAL_CONSENSUS_FPR_LEN > DIGEST_LEN)
|
|
|
|
#error "conditional consensus fingerprint length is larger than digest length"
|
|
|
|
#endif
|
|
|
|
|
2008-04-24 17:43:25 +02:00
|
|
|
/** Return the URL we should use for a consensus download.
|
|
|
|
*
|
2016-10-03 01:13:35 +02:00
|
|
|
* Use the "conditional consensus downloading" feature described in
|
|
|
|
* dir-spec.txt, i.e.
|
|
|
|
* GET .../consensus/<b>fpr</b>+<b>fpr</b>+<b>fpr</b>
|
2010-09-17 04:12:03 +02:00
|
|
|
*
|
|
|
|
* If 'resource' is provided, it is the name of a consensus flavor to request.
|
2008-04-24 17:43:25 +02:00
|
|
|
*/
|
|
|
|
static char *
|
2012-09-08 05:21:18 +02:00
|
|
|
directory_get_consensus_url(const char *resource)
|
2008-04-24 17:43:25 +02:00
|
|
|
{
|
2010-09-17 04:12:03 +02:00
|
|
|
char *url = NULL;
|
2011-02-23 18:32:15 +01:00
|
|
|
const char *hyphen, *flavor;
|
|
|
|
if (resource==NULL || strcmp(resource, "ns")==0) {
|
|
|
|
flavor = ""; /* Request ns consensuses as "", so older servers will work*/
|
|
|
|
hyphen = "";
|
|
|
|
} else {
|
|
|
|
flavor = resource;
|
|
|
|
hyphen = "-";
|
|
|
|
}
|
2008-04-24 17:43:25 +02:00
|
|
|
|
2012-09-08 05:21:18 +02:00
|
|
|
{
|
2008-04-24 17:43:25 +02:00
|
|
|
char *authority_id_list;
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *authority_digests = smartlist_new();
|
2008-04-24 17:43:25 +02:00
|
|
|
|
2012-07-17 15:33:38 +02:00
|
|
|
SMARTLIST_FOREACH_BEGIN(router_get_trusted_dir_servers(),
|
2012-09-10 21:55:27 +02:00
|
|
|
dir_server_t *, ds) {
|
2008-04-24 23:29:47 +02:00
|
|
|
char *hex;
|
2010-11-08 20:35:02 +01:00
|
|
|
if (!(ds->type & V3_DIRINFO))
|
2008-04-24 23:29:47 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
hex = tor_malloc(2*CONDITIONAL_CONSENSUS_FPR_LEN+1);
|
2008-04-24 17:43:25 +02:00
|
|
|
base16_encode(hex, 2*CONDITIONAL_CONSENSUS_FPR_LEN+1,
|
2008-04-24 23:29:47 +02:00
|
|
|
ds->v3_identity_digest, CONDITIONAL_CONSENSUS_FPR_LEN);
|
2009-06-20 09:21:52 +02:00
|
|
|
smartlist_add(authority_digests, hex);
|
2012-07-17 15:33:38 +02:00
|
|
|
} SMARTLIST_FOREACH_END(ds);
|
2012-10-12 18:22:13 +02:00
|
|
|
smartlist_sort(authority_digests, compare_strs_);
|
2009-06-20 09:21:52 +02:00
|
|
|
authority_id_list = smartlist_join_strings(authority_digests,
|
2008-04-24 17:43:25 +02:00
|
|
|
"+", 0, NULL);
|
|
|
|
|
2010-09-17 04:12:03 +02:00
|
|
|
tor_asprintf(&url, "/tor/status-vote/current/consensus%s%s/%s.z",
|
|
|
|
hyphen, flavor, authority_id_list);
|
2008-04-24 17:43:25 +02:00
|
|
|
|
2009-06-20 09:21:52 +02:00
|
|
|
SMARTLIST_FOREACH(authority_digests, char *, cp, tor_free(cp));
|
|
|
|
smartlist_free(authority_digests);
|
2008-04-24 17:43:25 +02:00
|
|
|
tor_free(authority_id_list);
|
|
|
|
}
|
|
|
|
return url;
|
|
|
|
}
|
|
|
|
|
2016-02-06 22:30:49 +01:00
|
|
|
/**
|
|
|
|
* Copies the ipv6 from source to destination, subject to buffer size limit
|
|
|
|
* size. If decorate is true, makes sure the copied address is decorated.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
copy_ipv6_address(char* destination, const char* source, size_t len,
|
|
|
|
int decorate) {
|
|
|
|
tor_assert(destination);
|
|
|
|
tor_assert(source);
|
|
|
|
|
|
|
|
if (decorate && source[0] != '[') {
|
|
|
|
tor_snprintf(destination, len, "[%s]", source);
|
|
|
|
} else {
|
|
|
|
strlcpy(destination, source, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-21 21:49:10 +02:00
|
|
|
/** Queue an appropriate HTTP command for <b>request</b> on
|
|
|
|
* <b>conn</b>-\>outbuf. If <b>direct</b> is true, we're making a
|
|
|
|
* non-anonymized connection to the dirport.
|
2004-05-05 04:50:38 +02:00
|
|
|
*/
|
2004-10-08 07:53:59 +02:00
|
|
|
static void
|
2007-01-27 22:18:08 +01:00
|
|
|
directory_send_command(dir_connection_t *conn,
|
2017-04-21 21:49:10 +02:00
|
|
|
const int direct,
|
|
|
|
const directory_request_t *req)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2017-04-21 21:49:10 +02:00
|
|
|
tor_assert(req);
|
|
|
|
const int purpose = req->dir_purpose;
|
|
|
|
const char *resource = req->resource;
|
|
|
|
const char *payload = req->payload;
|
|
|
|
const size_t payload_len = req->payload_len;
|
|
|
|
const time_t if_modified_since = req->if_modified_since;
|
2017-05-22 14:34:57 +02:00
|
|
|
const int anonymized_connection = dirind_is_anon(req->indirection);
|
2017-04-21 21:49:10 +02:00
|
|
|
|
2005-05-20 10:51:45 +02:00
|
|
|
char proxystring[256];
|
2004-10-08 07:53:59 +02:00
|
|
|
char hoststring[128];
|
2016-02-06 22:30:49 +01:00
|
|
|
/* NEEDS to be the same size hoststring.
|
|
|
|
Will be decorated with brackets around it if it is ipv6. */
|
|
|
|
char decorated_address[128];
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *headers = smartlist_new();
|
2005-09-07 22:03:02 +02:00
|
|
|
char *url;
|
2017-05-11 01:43:37 +02:00
|
|
|
char *accept_encoding;
|
2017-02-07 15:24:51 +01:00
|
|
|
size_t url_len;
|
2005-09-07 22:03:02 +02:00
|
|
|
char request[8192];
|
2017-02-07 15:24:51 +01:00
|
|
|
size_t request_len, total_request_len = 0;
|
2004-10-27 23:14:11 +02:00
|
|
|
const char *httpcommand = NULL;
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2004-10-17 00:14:52 +02:00
|
|
|
tor_assert(conn);
|
2012-10-12 18:22:13 +02:00
|
|
|
tor_assert(conn->base_.type == CONN_TYPE_DIR);
|
2004-09-27 05:39:30 +02:00
|
|
|
|
2005-09-08 22:18:15 +02:00
|
|
|
tor_free(conn->requested_resource);
|
2005-09-08 22:36:40 +02:00
|
|
|
if (resource)
|
|
|
|
conn->requested_resource = tor_strdup(resource);
|
2005-09-08 22:18:15 +02:00
|
|
|
|
2016-02-06 22:30:49 +01:00
|
|
|
/* decorate the ip address if it is ipv6 */
|
2016-02-10 03:20:59 +01:00
|
|
|
if (strchr(conn->base_.address, ':')) {
|
2016-02-06 22:30:49 +01:00
|
|
|
copy_ipv6_address(decorated_address, conn->base_.address,
|
|
|
|
sizeof(decorated_address), 1);
|
|
|
|
} else {
|
|
|
|
strlcpy(decorated_address, conn->base_.address, sizeof(decorated_address));
|
|
|
|
}
|
|
|
|
|
2005-05-20 10:51:45 +02:00
|
|
|
/* come up with a string for which Host: we want */
|
2012-10-12 18:22:13 +02:00
|
|
|
if (conn->base_.port == 80) {
|
2016-02-06 22:30:49 +01:00
|
|
|
strlcpy(hoststring, decorated_address, sizeof(hoststring));
|
2004-10-08 07:53:59 +02:00
|
|
|
} else {
|
2016-02-06 22:30:49 +01:00
|
|
|
tor_snprintf(hoststring, sizeof(hoststring), "%s:%d",
|
|
|
|
decorated_address, conn->base_.port);
|
2004-10-08 07:53:59 +02:00
|
|
|
}
|
2005-05-20 10:51:45 +02:00
|
|
|
|
2007-10-28 21:30:21 +01:00
|
|
|
/* Format if-modified-since */
|
2011-06-24 22:38:44 +02:00
|
|
|
if (if_modified_since) {
|
2007-10-28 21:30:21 +01:00
|
|
|
char b[RFC1123_TIME_LEN+1];
|
|
|
|
format_rfc1123_time(b, if_modified_since);
|
2012-01-11 19:44:10 +01:00
|
|
|
smartlist_add_asprintf(headers, "If-Modified-Since: %s\r\n", b);
|
2007-10-28 21:30:21 +01:00
|
|
|
}
|
|
|
|
|
2005-05-20 10:51:45 +02:00
|
|
|
/* come up with some proxy lines, if we're using one. */
|
2010-10-29 19:41:24 +02:00
|
|
|
if (direct && get_options()->HTTPProxy) {
|
2005-05-20 10:51:45 +02:00
|
|
|
char *base64_authenticator=NULL;
|
2010-10-29 19:41:24 +02:00
|
|
|
const char *authenticator = get_options()->HTTPProxyAuthenticator;
|
2005-05-20 10:51:45 +02:00
|
|
|
|
2004-10-27 08:37:34 +02:00
|
|
|
tor_snprintf(proxystring, sizeof(proxystring),"http://%s", hoststring);
|
2005-05-20 10:51:45 +02:00
|
|
|
if (authenticator) {
|
|
|
|
base64_authenticator = alloc_http_authenticator(authenticator);
|
|
|
|
if (!base64_authenticator)
|
2006-02-13 10:37:53 +01:00
|
|
|
log_warn(LD_BUG, "Encoding http authenticator failed");
|
2005-05-20 10:51:45 +02:00
|
|
|
}
|
|
|
|
if (base64_authenticator) {
|
2012-01-11 19:44:10 +01:00
|
|
|
smartlist_add_asprintf(headers,
|
2011-06-24 22:38:44 +02:00
|
|
|
"Proxy-Authorization: Basic %s\r\n",
|
2005-05-20 10:51:45 +02:00
|
|
|
base64_authenticator);
|
|
|
|
tor_free(base64_authenticator);
|
|
|
|
}
|
2004-10-08 07:53:59 +02:00
|
|
|
} else {
|
|
|
|
proxystring[0] = 0;
|
|
|
|
}
|
|
|
|
|
2017-05-22 14:34:57 +02:00
|
|
|
if (! anonymized_connection) {
|
|
|
|
/* Add Accept-Encoding. */
|
|
|
|
accept_encoding = accept_encoding_header();
|
|
|
|
smartlist_add_asprintf(headers, "Accept-Encoding: %s\r\n",
|
|
|
|
accept_encoding);
|
|
|
|
tor_free(accept_encoding);
|
|
|
|
}
|
2017-05-11 01:43:37 +02:00
|
|
|
|
2017-05-03 20:04:04 +02:00
|
|
|
/* Add additional headers, if any */
|
|
|
|
{
|
|
|
|
config_line_t *h;
|
|
|
|
for (h = req->additional_headers; h; h = h->next) {
|
2017-05-04 14:57:34 +02:00
|
|
|
smartlist_add_asprintf(headers, "%s%s\r\n", h->key, h->value);
|
2017-05-03 20:04:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-11-28 10:05:49 +01:00
|
|
|
switch (purpose) {
|
2007-08-24 16:41:06 +02:00
|
|
|
case DIR_PURPOSE_FETCH_CONSENSUS:
|
2010-09-17 04:12:03 +02:00
|
|
|
/* resource is optional. If present, it's a flavor name */
|
2007-08-24 16:41:06 +02:00
|
|
|
tor_assert(!payload);
|
|
|
|
httpcommand = "GET";
|
2012-09-08 05:21:18 +02:00
|
|
|
url = directory_get_consensus_url(resource);
|
2008-09-14 09:17:44 +02:00
|
|
|
log_info(LD_DIR, "Downloading consensus from %s using %s",
|
|
|
|
hoststring, url);
|
2007-08-24 16:41:06 +02:00
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_CERTIFICATE:
|
|
|
|
tor_assert(resource);
|
|
|
|
tor_assert(!payload);
|
|
|
|
httpcommand = "GET";
|
2012-01-11 20:02:59 +01:00
|
|
|
tor_asprintf(&url, "/tor/keys/%s", resource);
|
2007-08-24 16:41:06 +02:00
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_STATUS_VOTE:
|
|
|
|
tor_assert(resource);
|
|
|
|
tor_assert(!payload);
|
|
|
|
httpcommand = "GET";
|
2012-01-11 20:02:59 +01:00
|
|
|
tor_asprintf(&url, "/tor/status-vote/next/%s.z", resource);
|
2007-08-24 16:41:06 +02:00
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_DETACHED_SIGNATURES:
|
|
|
|
tor_assert(!resource);
|
|
|
|
tor_assert(!payload);
|
|
|
|
httpcommand = "GET";
|
|
|
|
url = tor_strdup("/tor/status-vote/next/consensus-signatures.z");
|
|
|
|
break;
|
2005-09-07 22:03:02 +02:00
|
|
|
case DIR_PURPOSE_FETCH_SERVERDESC:
|
2008-09-05 23:19:53 +02:00
|
|
|
tor_assert(resource);
|
2005-09-08 22:36:40 +02:00
|
|
|
httpcommand = "GET";
|
2012-01-11 20:02:59 +01:00
|
|
|
tor_asprintf(&url, "/tor/server/%s", resource);
|
2003-09-17 22:09:06 +02:00
|
|
|
break;
|
2007-05-18 23:19:19 +02:00
|
|
|
case DIR_PURPOSE_FETCH_EXTRAINFO:
|
2008-09-05 23:19:53 +02:00
|
|
|
tor_assert(resource);
|
2007-05-18 23:19:19 +02:00
|
|
|
httpcommand = "GET";
|
2012-01-11 20:02:59 +01:00
|
|
|
tor_asprintf(&url, "/tor/extra/%s", resource);
|
2007-05-18 23:19:19 +02:00
|
|
|
break;
|
2010-05-11 23:20:33 +02:00
|
|
|
case DIR_PURPOSE_FETCH_MICRODESC:
|
|
|
|
tor_assert(resource);
|
|
|
|
httpcommand = "GET";
|
2010-10-08 02:41:15 +02:00
|
|
|
tor_asprintf(&url, "/tor/micro/%s", resource);
|
2010-05-11 23:20:33 +02:00
|
|
|
break;
|
2011-06-24 22:43:08 +02:00
|
|
|
case DIR_PURPOSE_UPLOAD_DIR: {
|
|
|
|
const char *why = router_get_descriptor_gen_reason();
|
2004-11-12 17:39:03 +01:00
|
|
|
tor_assert(!resource);
|
2004-04-25 22:37:37 +02:00
|
|
|
tor_assert(payload);
|
2004-10-08 07:53:59 +02:00
|
|
|
httpcommand = "POST";
|
2005-09-07 22:03:02 +02:00
|
|
|
url = tor_strdup("/tor/");
|
2011-06-24 22:43:08 +02:00
|
|
|
if (why) {
|
2012-01-11 19:44:10 +01:00
|
|
|
smartlist_add_asprintf(headers, "X-Desc-Gen-Reason: %s\r\n", why);
|
2011-06-24 22:43:08 +02:00
|
|
|
}
|
2004-03-31 00:57:49 +02:00
|
|
|
break;
|
2011-06-24 22:43:08 +02:00
|
|
|
}
|
2007-07-26 00:56:44 +02:00
|
|
|
case DIR_PURPOSE_UPLOAD_VOTE:
|
|
|
|
tor_assert(!resource);
|
|
|
|
tor_assert(payload);
|
|
|
|
httpcommand = "POST";
|
|
|
|
url = tor_strdup("/tor/post/vote");
|
|
|
|
break;
|
2007-07-29 04:55:21 +02:00
|
|
|
case DIR_PURPOSE_UPLOAD_SIGNATURES:
|
|
|
|
tor_assert(!resource);
|
|
|
|
tor_assert(payload);
|
|
|
|
httpcommand = "POST";
|
2007-08-14 16:30:33 +02:00
|
|
|
url = tor_strdup("/tor/post/consensus-signature");
|
2007-07-29 04:55:21 +02:00
|
|
|
break;
|
2007-10-29 20:10:42 +01:00
|
|
|
case DIR_PURPOSE_FETCH_RENDDESC_V2:
|
|
|
|
tor_assert(resource);
|
2007-10-31 21:48:06 +01:00
|
|
|
tor_assert(strlen(resource) <= REND_DESC_ID_V2_LEN_BASE32);
|
2008-09-24 16:44:29 +02:00
|
|
|
tor_assert(!payload);
|
2007-10-29 20:10:42 +01:00
|
|
|
httpcommand = "GET";
|
2012-01-11 20:02:59 +01:00
|
|
|
tor_asprintf(&url, "/tor/rendezvous2/%s", resource);
|
2007-10-29 20:10:42 +01:00
|
|
|
break;
|
2017-06-01 13:12:33 +02:00
|
|
|
case DIR_PURPOSE_FETCH_HSDESC:
|
|
|
|
tor_assert(resource);
|
|
|
|
tor_assert(strlen(resource) <= ED25519_BASE64_LEN);
|
|
|
|
tor_assert(!payload);
|
|
|
|
httpcommand = "GET";
|
|
|
|
tor_asprintf(&url, "/tor/hs/3/%s", resource);
|
|
|
|
break;
|
2007-10-29 20:10:42 +01:00
|
|
|
case DIR_PURPOSE_UPLOAD_RENDDESC_V2:
|
|
|
|
tor_assert(!resource);
|
|
|
|
tor_assert(payload);
|
|
|
|
httpcommand = "POST";
|
|
|
|
url = tor_strdup("/tor/rendezvous2/publish");
|
|
|
|
break;
|
2017-04-19 20:36:53 +02:00
|
|
|
case DIR_PURPOSE_UPLOAD_HSDESC:
|
|
|
|
tor_assert(resource);
|
|
|
|
tor_assert(payload);
|
|
|
|
httpcommand = "POST";
|
|
|
|
tor_asprintf(&url, "/tor/hs/%s/publish", resource);
|
|
|
|
break;
|
2005-09-08 08:22:44 +02:00
|
|
|
default:
|
|
|
|
tor_assert(0);
|
|
|
|
return;
|
2002-09-26 14:09:10 +02:00
|
|
|
}
|
2006-02-03 16:17:48 +01:00
|
|
|
|
2014-09-23 18:22:28 +02:00
|
|
|
/* warn in the non-tunneled case */
|
|
|
|
if (direct && (strlen(proxystring) + strlen(url) >= 4096)) {
|
2006-02-13 10:37:53 +01:00
|
|
|
log_warn(LD_BUG,
|
2007-05-13 11:25:06 +02:00
|
|
|
"Squid does not like URLs longer than 4095 bytes, and this "
|
2006-02-13 10:37:53 +01:00
|
|
|
"one is %d bytes long: %s%s",
|
|
|
|
(int)(strlen(proxystring) + strlen(url)), proxystring, url);
|
2006-02-03 16:17:48 +01:00
|
|
|
}
|
|
|
|
|
2005-09-07 22:03:02 +02:00
|
|
|
tor_snprintf(request, sizeof(request), "%s %s", httpcommand, proxystring);
|
2017-02-07 15:24:51 +01:00
|
|
|
|
|
|
|
request_len = strlen(request);
|
|
|
|
total_request_len += request_len;
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(request, request_len, TO_CONN(conn));
|
2017-02-07 15:24:51 +01:00
|
|
|
|
|
|
|
url_len = strlen(url);
|
|
|
|
total_request_len += url_len;
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(url, url_len, TO_CONN(conn));
|
2005-09-07 22:03:02 +02:00
|
|
|
tor_free(url);
|
2004-10-08 07:53:59 +02:00
|
|
|
|
2011-06-24 22:38:44 +02:00
|
|
|
if (!strcmp(httpcommand, "POST") || payload) {
|
2012-01-11 19:44:10 +01:00
|
|
|
smartlist_add_asprintf(headers, "Content-Length: %lu\r\n",
|
2011-06-24 22:38:44 +02:00
|
|
|
payload ? (unsigned long)payload_len : 0);
|
2006-01-10 20:24:40 +01:00
|
|
|
}
|
2011-06-24 22:38:44 +02:00
|
|
|
|
2012-01-11 19:44:10 +01:00
|
|
|
{
|
|
|
|
char *header = smartlist_join_strings(headers, "", 0, NULL);
|
|
|
|
tor_snprintf(request, sizeof(request), " HTTP/1.0\r\nHost: %s\r\n%s\r\n",
|
|
|
|
hoststring, header);
|
|
|
|
tor_free(header);
|
|
|
|
}
|
2011-06-24 22:38:44 +02:00
|
|
|
|
2017-02-07 15:24:51 +01:00
|
|
|
request_len = strlen(request);
|
|
|
|
total_request_len += request_len;
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(request, request_len, TO_CONN(conn));
|
2005-09-07 22:03:02 +02:00
|
|
|
|
2004-11-28 10:05:49 +01:00
|
|
|
if (payload) {
|
2004-10-08 07:53:59 +02:00
|
|
|
/* then send the payload afterwards too */
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(payload, payload_len, TO_CONN(conn));
|
2017-02-07 15:24:51 +01:00
|
|
|
total_request_len += payload_len;
|
2004-10-08 07:53:59 +02:00
|
|
|
}
|
2011-06-24 22:38:44 +02:00
|
|
|
|
|
|
|
SMARTLIST_FOREACH(headers, char *, h, tor_free(h));
|
|
|
|
smartlist_free(headers);
|
2017-02-07 15:24:51 +01:00
|
|
|
|
|
|
|
log_debug(LD_DIR,
|
|
|
|
"Sent request to directory server '%s:%d': "
|
|
|
|
"(purpose: %d, request size: " U64_FORMAT ", "
|
|
|
|
"payload size: " U64_FORMAT ")",
|
|
|
|
conn->base_.address, conn->base_.port,
|
|
|
|
conn->base_.purpose,
|
|
|
|
U64_PRINTF_ARG(total_request_len),
|
|
|
|
U64_PRINTF_ARG(payload ? payload_len : 0));
|
2004-03-31 00:57:49 +02:00
|
|
|
}
|
|
|
|
|
2004-10-08 07:53:59 +02:00
|
|
|
/** Parse an HTTP request string <b>headers</b> of the form
|
2005-05-17 19:01:36 +02:00
|
|
|
* \verbatim
|
2004-10-08 07:53:59 +02:00
|
|
|
* "\%s [http[s]://]\%s HTTP/1..."
|
2005-05-17 19:01:36 +02:00
|
|
|
* \endverbatim
|
2004-09-27 05:39:30 +02:00
|
|
|
* If it's well-formed, strdup the second \%s into *<b>url</b>, and
|
2006-07-15 21:21:30 +02:00
|
|
|
* nul-terminate it. If the url doesn't start with "/tor/", rewrite it
|
2004-09-27 05:39:30 +02:00
|
|
|
* so it does. Return 0.
|
2004-03-31 00:57:49 +02:00
|
|
|
* Otherwise, return -1.
|
|
|
|
*/
|
2013-09-03 02:14:43 +02:00
|
|
|
STATIC int
|
2007-01-30 23:19:41 +01:00
|
|
|
parse_http_url(const char *headers, char **url)
|
2004-05-13 01:48:57 +02:00
|
|
|
{
|
2017-08-20 17:59:51 +02:00
|
|
|
char *command = NULL;
|
|
|
|
if (parse_http_command(headers, &command, url) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2017-09-05 20:55:13 +02:00
|
|
|
if (strcmpstart(*url, "/tor/")) {
|
2017-08-20 17:59:51 +02:00
|
|
|
char *new_url = NULL;
|
2017-09-05 21:02:16 +02:00
|
|
|
tor_asprintf(&new_url, "/tor%s%s",
|
|
|
|
*url[0] == '/' ? "" : "/",
|
|
|
|
*url);
|
2017-08-20 17:59:51 +02:00
|
|
|
tor_free(*url);
|
|
|
|
*url = new_url;
|
|
|
|
}
|
|
|
|
tor_free(command);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Parse an HTTP request line at the start of a headers string. On failure,
|
|
|
|
* return -1. On success, set *<b>command_out</b> to a copy of the HTTP
|
|
|
|
* command ("get", "post", etc), set *<b>url_out</b> to a copy of the URL, and
|
|
|
|
* return 0. */
|
|
|
|
int
|
|
|
|
parse_http_command(const char *headers, char **command_out, char **url_out)
|
|
|
|
{
|
|
|
|
const char *command, *end_of_command;
|
2004-10-08 07:53:59 +02:00
|
|
|
char *s, *start, *tmp;
|
2004-03-31 00:57:49 +02:00
|
|
|
|
|
|
|
s = (char *)eat_whitespace_no_nl(headers);
|
|
|
|
if (!*s) return -1;
|
2017-08-20 17:59:51 +02:00
|
|
|
command = s;
|
2004-03-31 00:57:49 +02:00
|
|
|
s = (char *)find_whitespace(s); /* get past GET/POST */
|
|
|
|
if (!*s) return -1;
|
2017-08-20 17:59:51 +02:00
|
|
|
end_of_command = s;
|
2004-03-31 00:57:49 +02:00
|
|
|
s = (char *)eat_whitespace_no_nl(s);
|
|
|
|
if (!*s) return -1;
|
2017-08-20 17:59:51 +02:00
|
|
|
start = s; /* this is the URL, assuming it's valid */
|
2004-09-27 08:45:32 +02:00
|
|
|
s = (char *)find_whitespace(start);
|
2004-03-31 00:57:49 +02:00
|
|
|
if (!*s) return -1;
|
2004-10-08 07:53:59 +02:00
|
|
|
|
|
|
|
/* tolerate the http[s] proxy style of putting the hostname in the url */
|
2004-11-28 10:05:49 +01:00
|
|
|
if (s-start >= 4 && !strcmpstart(start,"http")) {
|
2004-10-08 07:53:59 +02:00
|
|
|
tmp = start + 4;
|
2004-11-28 10:05:49 +01:00
|
|
|
if (*tmp == 's')
|
2004-10-08 07:53:59 +02:00
|
|
|
tmp++;
|
2004-11-28 10:05:49 +01:00
|
|
|
if (s-tmp >= 3 && !strcmpstart(tmp,"://")) {
|
2004-10-08 07:53:59 +02:00
|
|
|
tmp = strchr(tmp+3, '/');
|
2004-11-28 10:05:49 +01:00
|
|
|
if (tmp && tmp < s) {
|
2010-01-12 20:05:12 +01:00
|
|
|
log_debug(LD_DIR,"Skipping over 'http[s]://hostname/' string");
|
2004-10-08 07:53:59 +02:00
|
|
|
start = tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-03 02:14:43 +02:00
|
|
|
/* Check if the header is well formed (next sequence
|
2013-09-03 17:37:04 +02:00
|
|
|
* should be HTTP/1.X\r\n). Assumes we're supporting 1.0? */
|
|
|
|
{
|
|
|
|
unsigned minor_ver;
|
|
|
|
char ch;
|
2013-09-03 19:39:31 +02:00
|
|
|
char *e = (char *)eat_whitespace_no_nl(s);
|
2013-09-03 17:37:04 +02:00
|
|
|
if (2 != tor_sscanf(e, "HTTP/1.%u%c", &minor_ver, &ch)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (ch != '\r')
|
|
|
|
return -1;
|
2013-09-03 02:14:43 +02:00
|
|
|
}
|
|
|
|
|
2017-08-20 17:59:51 +02:00
|
|
|
*url_out = tor_memdup_nulterm(start, s-start);
|
|
|
|
*command_out = tor_memdup_nulterm(command, end_of_command - command);
|
2002-09-26 14:09:10 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-08-24 00:27:17 +02:00
|
|
|
/** Return a copy of the first HTTP header in <b>headers</b> whose key is
|
|
|
|
* <b>which</b>. The key should be given with a terminating colon and space;
|
|
|
|
* this function copies everything after, up to but not including the
|
2005-09-09 04:02:20 +02:00
|
|
|
* following \\r\\n. */
|
2017-08-20 17:59:51 +02:00
|
|
|
char *
|
2005-08-24 00:27:17 +02:00
|
|
|
http_get_header(const char *headers, const char *which)
|
|
|
|
{
|
|
|
|
const char *cp = headers;
|
|
|
|
while (cp) {
|
2007-12-23 02:28:25 +01:00
|
|
|
if (!strcasecmpstart(cp, which)) {
|
2005-08-24 00:27:17 +02:00
|
|
|
char *eos;
|
|
|
|
cp += strlen(which);
|
2005-09-09 04:02:20 +02:00
|
|
|
if ((eos = strchr(cp,'\r')))
|
2005-08-24 00:27:17 +02:00
|
|
|
return tor_strndup(cp, eos-cp);
|
|
|
|
else
|
|
|
|
return tor_strdup(cp);
|
|
|
|
}
|
2005-08-24 14:14:44 +02:00
|
|
|
cp = strchr(cp, '\n');
|
2005-08-24 00:27:17 +02:00
|
|
|
if (cp)
|
|
|
|
++cp;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2006-07-04 20:18:08 +02:00
|
|
|
/** If <b>headers</b> indicates that a proxy was involved, then rewrite
|
2006-07-17 08:35:06 +02:00
|
|
|
* <b>conn</b>-\>address to describe our best guess of the address that
|
|
|
|
* originated this HTTP request. */
|
2006-07-04 20:18:08 +02:00
|
|
|
static void
|
|
|
|
http_set_address_origin(const char *headers, connection_t *conn)
|
2005-08-24 00:27:17 +02:00
|
|
|
{
|
|
|
|
char *fwd;
|
|
|
|
|
|
|
|
fwd = http_get_header(headers, "Forwarded-For: ");
|
|
|
|
if (!fwd)
|
|
|
|
fwd = http_get_header(headers, "X-Forwarded-For: ");
|
|
|
|
if (fwd) {
|
2013-11-16 17:29:54 +01:00
|
|
|
tor_addr_t toraddr;
|
2014-04-02 03:10:14 +02:00
|
|
|
if (tor_addr_parse(&toraddr,fwd) == -1 ||
|
|
|
|
tor_addr_is_internal(&toraddr,0)) {
|
2013-11-20 20:49:17 +01:00
|
|
|
log_debug(LD_DIR, "Ignoring local/internal IP %s", escaped(fwd));
|
2007-02-07 04:40:06 +01:00
|
|
|
tor_free(fwd);
|
|
|
|
return;
|
|
|
|
}
|
2013-11-16 17:29:54 +01:00
|
|
|
|
2006-07-04 20:18:08 +02:00
|
|
|
tor_free(conn->address);
|
2007-02-07 04:40:06 +01:00
|
|
|
conn->address = tor_strdup(fwd);
|
2006-07-17 08:35:06 +02:00
|
|
|
tor_free(fwd);
|
2005-08-24 00:27:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-05-10 06:34:48 +02:00
|
|
|
/** Parse an HTTP response string <b>headers</b> of the form
|
2005-05-17 19:01:36 +02:00
|
|
|
* \verbatim
|
2004-05-10 06:34:48 +02:00
|
|
|
* "HTTP/1.\%d \%d\%s\r\n...".
|
2005-05-17 19:01:36 +02:00
|
|
|
* \endverbatim
|
2005-03-22 19:43:24 +01:00
|
|
|
*
|
|
|
|
* If it's well-formed, assign the status code to *<b>code</b> and
|
|
|
|
* return 0. Otherwise, return -1.
|
|
|
|
*
|
|
|
|
* On success: If <b>date</b> is provided, set *date to the Date
|
|
|
|
* header in the http headers, or 0 if no such header is found. If
|
|
|
|
* <b>compression</b> is provided, set *<b>compression</b> to the
|
|
|
|
* compression method given in the Content-Encoding header, or 0 if no
|
|
|
|
* such header is found, or -1 if the value of the header is not
|
|
|
|
* recognized. If <b>reason</b> is provided, strdup the reason string
|
|
|
|
* into it.
|
2004-03-12 13:43:13 +01:00
|
|
|
*/
|
2005-02-24 11:56:55 +01:00
|
|
|
int
|
2004-11-12 17:39:03 +01:00
|
|
|
parse_http_response(const char *headers, int *code, time_t *date,
|
2006-10-09 05:39:06 +02:00
|
|
|
compress_method_t *compression, char **reason)
|
2004-05-13 01:48:57 +02:00
|
|
|
{
|
2009-03-03 19:02:36 +01:00
|
|
|
unsigned n1, n2;
|
2004-08-15 22:30:15 +02:00
|
|
|
char datestr[RFC1123_TIME_LEN+1];
|
2004-09-08 08:52:33 +02:00
|
|
|
smartlist_t *parsed_headers;
|
2004-10-17 00:14:52 +02:00
|
|
|
tor_assert(headers);
|
|
|
|
tor_assert(code);
|
2004-03-12 13:43:13 +01:00
|
|
|
|
2004-12-08 01:42:50 +01:00
|
|
|
while (TOR_ISSPACE(*headers)) headers++; /* tolerate leading whitespace */
|
2004-03-12 13:43:13 +01:00
|
|
|
|
2009-03-03 19:02:36 +01:00
|
|
|
if (tor_sscanf(headers, "HTTP/1.%u %u", &n1, &n2) < 2 ||
|
2004-11-28 12:39:53 +01:00
|
|
|
(n1 != 0 && n1 != 1) ||
|
|
|
|
(n2 < 100 || n2 >= 600)) {
|
2006-03-05 10:50:26 +01:00
|
|
|
log_warn(LD_HTTP,"Failed to parse header %s",escaped(headers));
|
2004-03-12 13:43:13 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
*code = n2;
|
2004-11-12 17:39:03 +01:00
|
|
|
|
2012-01-18 21:53:30 +01:00
|
|
|
parsed_headers = smartlist_new();
|
2004-09-08 08:52:33 +02:00
|
|
|
smartlist_split_string(parsed_headers, headers, "\n",
|
|
|
|
SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, -1);
|
2005-03-22 19:43:24 +01:00
|
|
|
if (reason) {
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *status_line_elements = smartlist_new();
|
2005-03-22 19:43:24 +01:00
|
|
|
tor_assert(smartlist_len(parsed_headers));
|
|
|
|
smartlist_split_string(status_line_elements,
|
|
|
|
smartlist_get(parsed_headers, 0),
|
|
|
|
" ", SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 3);
|
|
|
|
tor_assert(smartlist_len(status_line_elements) <= 3);
|
|
|
|
if (smartlist_len(status_line_elements) == 3) {
|
|
|
|
*reason = smartlist_get(status_line_elements, 2);
|
|
|
|
smartlist_set(status_line_elements, 2, NULL); /* Prevent free */
|
|
|
|
}
|
|
|
|
SMARTLIST_FOREACH(status_line_elements, char *, cp, tor_free(cp));
|
|
|
|
smartlist_free(status_line_elements);
|
|
|
|
}
|
2004-08-15 22:30:15 +02:00
|
|
|
if (date) {
|
|
|
|
*date = 0;
|
2004-09-08 08:52:33 +02:00
|
|
|
SMARTLIST_FOREACH(parsed_headers, const char *, s,
|
|
|
|
if (!strcmpstart(s, "Date: ")) {
|
|
|
|
strlcpy(datestr, s+6, sizeof(datestr));
|
2004-08-15 22:30:15 +02:00
|
|
|
/* This will do nothing on failure, so we don't need to check
|
|
|
|
the result. We shouldn't warn, since there are many other valid
|
|
|
|
date formats besides the one we use. */
|
|
|
|
parse_rfc1123_time(datestr, date);
|
|
|
|
break;
|
2004-09-08 08:52:33 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
if (compression) {
|
|
|
|
const char *enc = NULL;
|
|
|
|
SMARTLIST_FOREACH(parsed_headers, const char *, s,
|
|
|
|
if (!strcmpstart(s, "Content-Encoding: ")) {
|
2004-10-01 06:45:14 +02:00
|
|
|
enc = s+18; break;
|
2004-09-08 08:52:33 +02:00
|
|
|
});
|
2017-05-12 12:21:49 +02:00
|
|
|
|
|
|
|
if (enc == NULL)
|
2006-10-09 05:39:06 +02:00
|
|
|
*compression = NO_METHOD;
|
2017-05-12 12:21:49 +02:00
|
|
|
else {
|
|
|
|
*compression = compression_method_get_by_name(enc);
|
|
|
|
|
|
|
|
if (*compression == UNKNOWN_METHOD)
|
|
|
|
log_info(LD_HTTP, "Unrecognized content encoding: %s. Trying to deal.",
|
|
|
|
escaped(enc));
|
2004-08-15 22:30:15 +02:00
|
|
|
}
|
|
|
|
}
|
2004-09-08 08:52:33 +02:00
|
|
|
SMARTLIST_FOREACH(parsed_headers, char *, s, tor_free(s));
|
|
|
|
smartlist_free(parsed_headers);
|
2004-08-15 22:30:15 +02:00
|
|
|
|
2004-03-12 13:43:13 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-01-19 23:40:33 +01:00
|
|
|
/** Return true iff <b>body</b> doesn't start with a plausible router or
|
2014-03-17 17:38:22 +01:00
|
|
|
* network-status or microdescriptor opening. This is a sign of possible
|
|
|
|
* compression. */
|
2005-01-19 23:40:33 +01:00
|
|
|
static int
|
2005-01-19 23:47:48 +01:00
|
|
|
body_is_plausible(const char *body, size_t len, int purpose)
|
2005-01-19 23:40:33 +01:00
|
|
|
{
|
|
|
|
int i;
|
2005-01-21 04:18:49 +01:00
|
|
|
if (len == 0)
|
|
|
|
return 1; /* empty bodies don't need decompression */
|
2005-01-19 23:40:33 +01:00
|
|
|
if (len < 32)
|
|
|
|
return 0;
|
2010-05-11 23:20:33 +02:00
|
|
|
if (purpose == DIR_PURPOSE_FETCH_MICRODESC) {
|
|
|
|
return (!strcmpstart(body,"onion-key"));
|
|
|
|
}
|
2017-01-02 18:16:57 +01:00
|
|
|
|
|
|
|
if (!strcmpstart(body,"router") ||
|
|
|
|
!strcmpstart(body,"network-status"))
|
|
|
|
return 1;
|
|
|
|
for (i=0;i<32;++i) {
|
|
|
|
if (!TOR_ISPRINT(body[i]) && !TOR_ISSPACE(body[i]))
|
|
|
|
return 0;
|
2005-01-19 23:40:33 +01:00
|
|
|
}
|
2017-01-02 18:16:57 +01:00
|
|
|
|
2014-02-12 14:36:08 +01:00
|
|
|
return 1;
|
2005-01-19 23:40:33 +01:00
|
|
|
}
|
|
|
|
|
2007-10-04 18:21:58 +02:00
|
|
|
/** Called when we've just fetched a bunch of router descriptors in
|
|
|
|
* <b>body</b>. The list <b>which</b>, if present, holds digests for
|
|
|
|
* descriptors we requested: descriptor digests if <b>descriptor_digests</b>
|
|
|
|
* is true, or identity digests otherwise. Parse the descriptors, validate
|
|
|
|
* them, and annotate them as having purpose <b>purpose</b> and as having been
|
2008-12-23 22:17:52 +01:00
|
|
|
* downloaded from <b>source</b>.
|
|
|
|
*
|
|
|
|
* Return the number of routers actually added. */
|
|
|
|
static int
|
2007-09-27 22:46:30 +02:00
|
|
|
load_downloaded_routers(const char *body, smartlist_t *which,
|
|
|
|
int descriptor_digests,
|
|
|
|
int router_purpose,
|
|
|
|
const char *source)
|
|
|
|
{
|
|
|
|
char buf[256];
|
|
|
|
char time_buf[ISO_TIME_LEN+1];
|
2008-12-23 22:17:52 +01:00
|
|
|
int added = 0;
|
2007-09-27 22:46:30 +02:00
|
|
|
int general = router_purpose == ROUTER_PURPOSE_GENERAL;
|
|
|
|
format_iso_time(time_buf, time(NULL));
|
2007-10-04 18:21:58 +02:00
|
|
|
tor_assert(source);
|
2007-09-27 22:46:30 +02:00
|
|
|
|
|
|
|
if (tor_snprintf(buf, sizeof(buf),
|
|
|
|
"@downloaded-at %s\n"
|
|
|
|
"@source %s\n"
|
|
|
|
"%s%s%s", time_buf, escaped(source),
|
|
|
|
!general ? "@purpose " : "",
|
|
|
|
!general ? router_purpose_to_string(router_purpose) : "",
|
|
|
|
!general ? "\n" : "")<0)
|
2008-12-23 22:17:52 +01:00
|
|
|
return added;
|
2007-09-27 22:46:30 +02:00
|
|
|
|
2008-12-23 22:17:52 +01:00
|
|
|
added = router_load_routers_from_string(body, NULL, SAVED_NOWHERE, which,
|
2007-09-27 22:46:30 +02:00
|
|
|
descriptor_digests, buf);
|
2016-02-22 09:02:01 +01:00
|
|
|
if (added && general)
|
2013-10-08 17:50:53 +02:00
|
|
|
control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_DESCRIPTORS,
|
|
|
|
count_loading_descriptors_progress());
|
2008-12-23 22:17:52 +01:00
|
|
|
return added;
|
2007-09-27 22:46:30 +02:00
|
|
|
}
|
|
|
|
|
2017-05-02 19:06:25 +02:00
|
|
|
static int handle_response_fetch_certificate(dir_connection_t *,
|
|
|
|
const response_handler_args_t *);
|
|
|
|
static int handle_response_fetch_status_vote(dir_connection_t *,
|
|
|
|
const response_handler_args_t *);
|
|
|
|
static int handle_response_fetch_detached_signatures(dir_connection_t *,
|
|
|
|
const response_handler_args_t *);
|
|
|
|
static int handle_response_fetch_desc(dir_connection_t *,
|
|
|
|
const response_handler_args_t *);
|
|
|
|
static int handle_response_upload_dir(dir_connection_t *,
|
|
|
|
const response_handler_args_t *);
|
|
|
|
static int handle_response_upload_vote(dir_connection_t *,
|
|
|
|
const response_handler_args_t *);
|
|
|
|
static int handle_response_upload_signatures(dir_connection_t *,
|
|
|
|
const response_handler_args_t *);
|
|
|
|
static int handle_response_fetch_renddesc_v2(dir_connection_t *,
|
|
|
|
const response_handler_args_t *);
|
|
|
|
static int handle_response_upload_renddesc_v2(dir_connection_t *,
|
|
|
|
const response_handler_args_t *);
|
2017-04-19 20:36:53 +02:00
|
|
|
static int handle_response_upload_hsdesc(dir_connection_t *,
|
|
|
|
const response_handler_args_t *);
|
2017-05-02 19:06:25 +02:00
|
|
|
|
2017-06-20 17:43:37 +02:00
|
|
|
static int
|
|
|
|
dir_client_decompress_response_body(char **bodyp, size_t *bodylenp,
|
|
|
|
dir_connection_t *conn,
|
|
|
|
compress_method_t compression,
|
|
|
|
int anonymized_connection)
|
|
|
|
{
|
|
|
|
int rv = 0;
|
|
|
|
const char *body = *bodyp;
|
|
|
|
size_t body_len = *bodylenp;
|
|
|
|
int allow_partial = (conn->base_.purpose == DIR_PURPOSE_FETCH_SERVERDESC ||
|
|
|
|
conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO ||
|
|
|
|
conn->base_.purpose == DIR_PURPOSE_FETCH_MICRODESC);
|
|
|
|
|
|
|
|
int plausible = body_is_plausible(body, body_len, conn->base_.purpose);
|
|
|
|
|
2017-06-20 17:46:54 +02:00
|
|
|
if (plausible && compression == NO_METHOD) {
|
|
|
|
return 0;
|
|
|
|
}
|
2017-06-20 17:43:37 +02:00
|
|
|
|
2017-06-20 17:46:54 +02:00
|
|
|
int severity = LOG_DEBUG;
|
|
|
|
char *new_body = NULL;
|
|
|
|
size_t new_len = 0;
|
|
|
|
const char *description1, *description2;
|
|
|
|
int want_to_try_both = 0;
|
|
|
|
int tried_both = 0;
|
|
|
|
compress_method_t guessed = detect_compression_method(body, body_len);
|
|
|
|
|
|
|
|
description1 = compression_method_get_human_name(compression);
|
|
|
|
|
|
|
|
if (BUG(description1 == NULL))
|
|
|
|
description1 = compression_method_get_human_name(UNKNOWN_METHOD);
|
|
|
|
|
|
|
|
if (guessed == UNKNOWN_METHOD && !plausible)
|
|
|
|
description2 = "confusing binary junk";
|
|
|
|
else
|
|
|
|
description2 = compression_method_get_human_name(guessed);
|
|
|
|
|
|
|
|
/* Tell the user if we don't believe what we're told about compression.*/
|
|
|
|
want_to_try_both = (compression == UNKNOWN_METHOD ||
|
|
|
|
guessed != compression);
|
|
|
|
if (want_to_try_both) {
|
2017-06-20 17:48:15 +02:00
|
|
|
severity = LOG_PROTOCOL_WARN;
|
2017-06-20 17:46:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
tor_log(severity, LD_HTTP,
|
|
|
|
"HTTP body from server '%s:%d' was labeled as %s, "
|
|
|
|
"%s it seems to be %s.%s",
|
|
|
|
conn->base_.address, conn->base_.port, description1,
|
|
|
|
guessed != compression?"but":"and",
|
|
|
|
description2,
|
|
|
|
(compression>0 && guessed>0 && want_to_try_both)?
|
|
|
|
" Trying both.":"");
|
|
|
|
|
|
|
|
/* Try declared compression first if we can.
|
|
|
|
* tor_compress_supports_method() also returns true for NO_METHOD.
|
|
|
|
* Ensure that the server is not sending us data compressed using a
|
|
|
|
* compression method that is not allowed for anonymous connections. */
|
|
|
|
if (anonymized_connection &&
|
|
|
|
! allowed_anonymous_connection_compression_method(compression)) {
|
|
|
|
warn_disallowed_anonymous_compression_method(compression);
|
|
|
|
rv = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:55:18 +02:00
|
|
|
if (tor_compress_supports_method(compression)) {
|
2017-06-20 17:46:54 +02:00
|
|
|
tor_uncompress(&new_body, &new_len, body, body_len, compression,
|
|
|
|
!allow_partial, LOG_PROTOCOL_WARN);
|
2017-06-20 17:55:18 +02:00
|
|
|
if (new_body) {
|
|
|
|
/* We succeeded with the declared compression method. Great! */
|
|
|
|
rv = 0;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
2017-06-20 17:46:54 +02:00
|
|
|
|
|
|
|
/* Okay, if that didn't work, and we think that it was compressed
|
|
|
|
* differently, try that. */
|
|
|
|
if (anonymized_connection &&
|
|
|
|
! allowed_anonymous_connection_compression_method(guessed)) {
|
|
|
|
warn_disallowed_anonymous_compression_method(guessed);
|
|
|
|
rv = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:55:18 +02:00
|
|
|
if (tor_compress_supports_method(guessed) &&
|
2017-06-20 17:46:54 +02:00
|
|
|
compression != guessed) {
|
|
|
|
tor_uncompress(&new_body, &new_len, body, body_len, guessed,
|
2017-06-20 17:49:54 +02:00
|
|
|
!allow_partial, LOG_INFO);
|
2017-06-20 17:46:54 +02:00
|
|
|
tried_both = 1;
|
|
|
|
}
|
|
|
|
/* If we're pretty sure that we have a compressed directory, and
|
|
|
|
* we didn't manage to uncompress it, then warn and bail. */
|
|
|
|
if (!plausible && !new_body) {
|
|
|
|
log_fn(LOG_PROTOCOL_WARN, LD_HTTP,
|
|
|
|
"Unable to decompress HTTP body (tried %s%s%s, server '%s:%d').",
|
|
|
|
description1,
|
|
|
|
tried_both?" and ":"",
|
|
|
|
tried_both?description2:"",
|
|
|
|
conn->base_.address, conn->base_.port);
|
|
|
|
rv = -1;
|
|
|
|
goto done;
|
|
|
|
}
|
2017-06-20 17:55:18 +02:00
|
|
|
|
|
|
|
done:
|
2017-06-20 17:46:54 +02:00
|
|
|
if (new_body) {
|
2017-06-20 17:55:18 +02:00
|
|
|
if (rv == 0) {
|
|
|
|
/* success! */
|
|
|
|
tor_free(*bodyp);
|
|
|
|
*bodyp = new_body;
|
|
|
|
*bodylenp = new_len;
|
|
|
|
} else {
|
|
|
|
tor_free(new_body);
|
|
|
|
}
|
2017-06-20 17:43:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2004-07-20 04:44:26 +02:00
|
|
|
/** We are a client, and we've finished reading the server's
|
2007-11-26 03:18:57 +01:00
|
|
|
* response. Parse it and act appropriately.
|
2004-07-20 04:44:26 +02:00
|
|
|
*
|
2005-12-15 22:15:16 +01:00
|
|
|
* If we're still happy with using this directory server in the future, return
|
|
|
|
* 0. Otherwise return -1; and the caller should consider trying the request
|
|
|
|
* again.
|
2005-09-12 09:36:26 +02:00
|
|
|
*
|
|
|
|
* The caller will take care of marking the connection for close.
|
2004-05-05 04:50:38 +02:00
|
|
|
*/
|
2004-07-20 04:44:26 +02:00
|
|
|
static int
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_dir_client_reached_eof(dir_connection_t *conn)
|
2004-07-20 04:44:26 +02:00
|
|
|
{
|
2017-05-22 16:42:18 +02:00
|
|
|
char *body = NULL;
|
|
|
|
char *headers = NULL;
|
2005-03-22 19:43:24 +01:00
|
|
|
char *reason = NULL;
|
2016-05-11 21:31:48 +02:00
|
|
|
size_t body_len = 0;
|
2004-03-12 13:43:13 +01:00
|
|
|
int status_code;
|
2013-10-09 17:13:06 +02:00
|
|
|
time_t date_header = 0;
|
2015-12-08 20:32:29 +01:00
|
|
|
long apparent_skew;
|
2006-10-09 05:39:06 +02:00
|
|
|
compress_method_t compression;
|
2013-10-09 17:13:06 +02:00
|
|
|
int skewed = 0;
|
2017-05-22 16:42:18 +02:00
|
|
|
int rv;
|
2012-10-12 18:22:13 +02:00
|
|
|
int allow_partial = (conn->base_.purpose == DIR_PURPOSE_FETCH_SERVERDESC ||
|
|
|
|
conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO ||
|
|
|
|
conn->base_.purpose == DIR_PURPOSE_FETCH_MICRODESC);
|
2017-02-07 15:34:49 +01:00
|
|
|
size_t received_bytes;
|
2017-05-22 16:45:12 +02:00
|
|
|
const int anonymized_connection =
|
|
|
|
purpose_needs_anonymity(conn->base_.purpose,
|
|
|
|
conn->router_purpose,
|
|
|
|
conn->requested_resource);
|
2017-02-07 15:34:49 +01:00
|
|
|
|
|
|
|
received_bytes = connection_get_inbuf_len(TO_CONN(conn));
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2009-08-09 20:40:28 +02:00
|
|
|
switch (connection_fetch_from_buf_http(TO_CONN(conn),
|
2004-11-28 12:39:53 +01:00
|
|
|
&headers, MAX_HEADERS_SIZE,
|
2007-02-16 21:00:50 +01:00
|
|
|
&body, &body_len, MAX_DIR_DL_SIZE,
|
2005-10-14 04:26:13 +02:00
|
|
|
allow_partial)) {
|
2004-07-20 04:44:26 +02:00
|
|
|
case -1: /* overflow */
|
2006-02-13 10:37:53 +01:00
|
|
|
log_warn(LD_PROTOCOL,
|
|
|
|
"'fetch' response too large (server '%s:%d'). Closing.",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address, conn->base_.port);
|
2004-07-20 04:44:26 +02:00
|
|
|
return -1;
|
|
|
|
case 0:
|
2006-02-13 10:37:53 +01:00
|
|
|
log_info(LD_HTTP,
|
|
|
|
"'fetch' response not all here, but we're at eof. Closing.");
|
2004-07-20 04:44:26 +02:00
|
|
|
return -1;
|
|
|
|
/* case 1, fall through */
|
|
|
|
}
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2004-11-28 10:05:49 +01:00
|
|
|
if (parse_http_response(headers, &status_code, &date_header,
|
2005-03-22 19:43:24 +01:00
|
|
|
&compression, &reason) < 0) {
|
2006-02-13 10:37:53 +01:00
|
|
|
log_warn(LD_HTTP,"Unparseable headers (server '%s:%d'). Closing.",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address, conn->base_.port);
|
2017-05-22 16:42:18 +02:00
|
|
|
|
|
|
|
rv = -1;
|
|
|
|
goto done;
|
2004-07-20 04:44:26 +02:00
|
|
|
}
|
2005-03-22 19:43:24 +01:00
|
|
|
if (!reason) reason = tor_strdup("[no reason given]");
|
|
|
|
|
2017-02-27 16:58:19 +01:00
|
|
|
tor_log(LOG_DEBUG, LD_DIR,
|
2011-11-24 17:40:10 +01:00
|
|
|
"Received response from directory server '%s:%d': %d %s "
|
2017-02-13 17:57:21 +01:00
|
|
|
"(purpose: %d, response size: " U64_FORMAT
|
|
|
|
#ifdef MEASUREMENTS_21206
|
|
|
|
", data cells received: %d, data cells sent: %d"
|
|
|
|
#endif
|
|
|
|
", compression: %d)",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address, conn->base_.port, status_code,
|
2017-02-13 17:57:21 +01:00
|
|
|
escaped(reason), conn->base_.purpose,
|
2017-02-07 15:34:49 +01:00
|
|
|
U64_PRINTF_ARG(received_bytes),
|
2017-02-13 17:57:21 +01:00
|
|
|
#ifdef MEASUREMENTS_21206
|
|
|
|
conn->data_cells_received, conn->data_cells_sent,
|
|
|
|
#endif
|
2017-02-07 15:34:49 +01:00
|
|
|
compression);
|
2005-03-22 19:43:24 +01:00
|
|
|
|
2017-01-02 15:56:06 +01:00
|
|
|
if (conn->guard_state) {
|
|
|
|
/* we count the connection as successful once we can read from it. We do
|
|
|
|
* not, however, delay use of the circuit here, since it's just for a
|
|
|
|
* one-hop directory request. */
|
|
|
|
/* XXXXprop271 note that this will not do the right thing for other
|
|
|
|
* waiting circuits that would be triggered by this circuit becoming
|
|
|
|
* complete/usable. But that's ok, I think.
|
|
|
|
*/
|
|
|
|
entry_guard_succeeded(&conn->guard_state);
|
|
|
|
circuit_guard_state_free(conn->guard_state);
|
|
|
|
conn->guard_state = NULL;
|
|
|
|
}
|
|
|
|
|
2006-07-17 08:35:06 +02:00
|
|
|
/* now check if it's got any hints for us about our IP address. */
|
2006-09-30 22:14:15 +02:00
|
|
|
if (conn->dirconn_direct) {
|
|
|
|
char *guess = http_get_header(headers, X_ADDRESS_HEADER);
|
|
|
|
if (guess) {
|
2008-02-09 11:36:49 +01:00
|
|
|
router_new_address_suggestion(guess, conn);
|
2006-09-30 22:14:15 +02:00
|
|
|
tor_free(guess);
|
|
|
|
}
|
2006-07-17 08:35:06 +02:00
|
|
|
}
|
|
|
|
|
2004-08-15 22:30:15 +02:00
|
|
|
if (date_header > 0) {
|
2007-01-10 17:33:40 +01:00
|
|
|
/* The date header was written very soon after we sent our request,
|
|
|
|
* so compute the skew as the difference between sending the request
|
|
|
|
* and the date header. (We used to check now-date_header, but that's
|
|
|
|
* inaccurate if we spend a lot of time downloading.)
|
|
|
|
*/
|
2018-02-01 03:12:38 +01:00
|
|
|
apparent_skew = conn->base_.timestamp_last_write_allowed - date_header;
|
2015-12-08 20:32:29 +01:00
|
|
|
if (labs(apparent_skew)>ALLOW_DIRECTORY_TIME_SKEW) {
|
2007-01-06 06:42:31 +01:00
|
|
|
int trusted = router_digest_is_trusted_dir(conn->identity_digest);
|
2015-12-08 20:32:29 +01:00
|
|
|
clock_skew_warning(TO_CONN(conn), apparent_skew, trusted, LD_HTTP,
|
|
|
|
"directory", "DIRSERV");
|
2005-01-29 12:48:37 +01:00
|
|
|
skewed = 1; /* don't check the recommended-versions line */
|
2004-08-15 22:30:15 +02:00
|
|
|
} else {
|
2006-02-13 10:37:53 +01:00
|
|
|
log_debug(LD_HTTP, "Time on received directory is within tolerance; "
|
2015-12-08 20:32:29 +01:00
|
|
|
"we are %ld seconds skewed. (That's okay.)", apparent_skew);
|
2004-08-15 22:30:15 +02:00
|
|
|
}
|
|
|
|
}
|
2006-11-14 04:45:48 +01:00
|
|
|
(void) skewed; /* skewed isn't used yet. */
|
2004-07-20 04:44:26 +02:00
|
|
|
|
2009-10-27 14:14:52 +01:00
|
|
|
if (status_code == 503) {
|
2011-03-25 22:11:04 +01:00
|
|
|
routerstatus_t *rs;
|
2012-09-10 21:55:27 +02:00
|
|
|
dir_server_t *ds;
|
2011-05-06 03:56:52 +02:00
|
|
|
const char *id_digest = conn->identity_digest;
|
2011-03-25 22:11:04 +01:00
|
|
|
log_info(LD_DIR,"Received http status code %d (%s) from server "
|
|
|
|
"'%s:%d'. I'll try again soon.",
|
2012-10-12 18:22:13 +02:00
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port);
|
2017-05-02 19:06:25 +02:00
|
|
|
time_t now = approx_time();
|
2011-05-06 03:56:52 +02:00
|
|
|
if ((rs = router_get_mutable_consensus_status_by_id(id_digest)))
|
2011-03-25 22:11:04 +01:00
|
|
|
rs->last_dir_503_at = now;
|
2012-09-10 21:23:39 +02:00
|
|
|
if ((ds = router_get_fallback_dirserver_by_digest(id_digest)))
|
2011-03-25 22:11:04 +01:00
|
|
|
ds->fake_status.last_dir_503_at = now;
|
2009-10-27 14:14:52 +01:00
|
|
|
|
2017-05-22 16:42:18 +02:00
|
|
|
rv = -1;
|
|
|
|
goto done;
|
2006-02-03 13:08:31 +01:00
|
|
|
}
|
|
|
|
|
2017-06-20 17:43:37 +02:00
|
|
|
if (dir_client_decompress_response_body(&body, &body_len,
|
|
|
|
conn, compression, anonymized_connection) < 0) {
|
|
|
|
rv = -1;
|
|
|
|
goto done;
|
2004-09-08 08:52:33 +02:00
|
|
|
}
|
|
|
|
|
2017-05-02 19:06:25 +02:00
|
|
|
response_handler_args_t args;
|
|
|
|
memset(&args, 0, sizeof(args));
|
|
|
|
args.status_code = status_code;
|
|
|
|
args.reason = reason;
|
|
|
|
args.body = body;
|
|
|
|
args.body_len = body_len;
|
|
|
|
args.headers = headers;
|
|
|
|
|
|
|
|
switch (conn->base_.purpose) {
|
|
|
|
case DIR_PURPOSE_FETCH_CONSENSUS:
|
|
|
|
rv = handle_response_fetch_consensus(conn, &args);
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_CERTIFICATE:
|
|
|
|
rv = handle_response_fetch_certificate(conn, &args);
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_STATUS_VOTE:
|
|
|
|
rv = handle_response_fetch_status_vote(conn, &args);
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_DETACHED_SIGNATURES:
|
|
|
|
rv = handle_response_fetch_detached_signatures(conn, &args);
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_SERVERDESC:
|
|
|
|
case DIR_PURPOSE_FETCH_EXTRAINFO:
|
|
|
|
rv = handle_response_fetch_desc(conn, &args);
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_MICRODESC:
|
|
|
|
rv = handle_response_fetch_microdesc(conn, &args);
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_FETCH_RENDDESC_V2:
|
|
|
|
rv = handle_response_fetch_renddesc_v2(conn, &args);
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_UPLOAD_DIR:
|
|
|
|
rv = handle_response_upload_dir(conn, &args);
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_UPLOAD_SIGNATURES:
|
|
|
|
rv = handle_response_upload_signatures(conn, &args);
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_UPLOAD_VOTE:
|
|
|
|
rv = handle_response_upload_vote(conn, &args);
|
|
|
|
break;
|
|
|
|
case DIR_PURPOSE_UPLOAD_RENDDESC_V2:
|
|
|
|
rv = handle_response_upload_renddesc_v2(conn, &args);
|
|
|
|
break;
|
2017-04-19 20:36:53 +02:00
|
|
|
case DIR_PURPOSE_UPLOAD_HSDESC:
|
|
|
|
rv = handle_response_upload_hsdesc(conn, &args);
|
|
|
|
break;
|
2017-06-01 13:25:46 +02:00
|
|
|
case DIR_PURPOSE_FETCH_HSDESC:
|
|
|
|
rv = handle_response_fetch_hsdesc_v3(conn, &args);
|
|
|
|
break;
|
2017-05-02 19:06:25 +02:00
|
|
|
default:
|
|
|
|
tor_assert_nonfatal_unreached();
|
|
|
|
rv = -1;
|
|
|
|
break;
|
|
|
|
}
|
2017-05-22 16:42:18 +02:00
|
|
|
|
|
|
|
done:
|
2017-05-02 19:06:25 +02:00
|
|
|
tor_free(body);
|
|
|
|
tor_free(headers);
|
|
|
|
tor_free(reason);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a request for a networkstatus
|
|
|
|
* consensus document by checking the consensus, storing it, and marking
|
|
|
|
* router requests as reachable.
|
|
|
|
**/
|
2017-10-25 18:25:53 +02:00
|
|
|
STATIC int
|
2017-05-02 19:06:25 +02:00
|
|
|
handle_response_fetch_consensus(dir_connection_t *conn,
|
|
|
|
const response_handler_args_t *args)
|
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_CONSENSUS);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *body = args->body;
|
|
|
|
const size_t body_len = args->body_len;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
const time_t now = approx_time();
|
|
|
|
|
2017-05-03 21:08:59 +02:00
|
|
|
const char *consensus;
|
|
|
|
char *new_consensus = NULL;
|
|
|
|
const char *sourcename;
|
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
int r;
|
|
|
|
const char *flavname = conn->requested_resource;
|
|
|
|
if (status_code != 200) {
|
|
|
|
int severity = (status_code == 304) ? LOG_INFO : LOG_WARN;
|
|
|
|
tor_log(severity, LD_DIR,
|
|
|
|
"Received http status code %d (%s) from server "
|
|
|
|
"'%s:%d' while fetching consensus directory.",
|
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port);
|
|
|
|
networkstatus_consensus_download_failed(status_code, flavname);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-05-03 21:08:59 +02:00
|
|
|
|
|
|
|
if (looks_like_a_consensus_diff(body, body_len)) {
|
|
|
|
/* First find our previous consensus. Maybe it's in ram, maybe not. */
|
|
|
|
cached_dir_t *cd = dirserv_get_consensus(flavname);
|
|
|
|
const char *consensus_body;
|
|
|
|
char *owned_consensus = NULL;
|
|
|
|
if (cd) {
|
|
|
|
consensus_body = cd->dir;
|
|
|
|
} else {
|
|
|
|
owned_consensus = networkstatus_read_cached_consensus(flavname);
|
|
|
|
consensus_body = owned_consensus;
|
|
|
|
}
|
|
|
|
if (!consensus_body) {
|
|
|
|
log_warn(LD_DIR, "Received a consensus diff, but we can't find "
|
|
|
|
"any %s-flavored consensus in our current cache.",flavname);
|
|
|
|
networkstatus_consensus_download_failed(0, flavname);
|
|
|
|
// XXXX if this happens too much, see below
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_consensus = consensus_diff_apply(consensus_body, body);
|
|
|
|
tor_free(owned_consensus);
|
|
|
|
if (new_consensus == NULL) {
|
|
|
|
log_warn(LD_DIR, "Could not apply consensus diff received from server "
|
|
|
|
"'%s:%d'", conn->base_.address, conn->base_.port);
|
|
|
|
// XXXX If this happens too many times, we should maybe not use
|
|
|
|
// XXXX this directory for diffs any more?
|
|
|
|
networkstatus_consensus_download_failed(0, flavname);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-05-04 14:58:06 +02:00
|
|
|
log_info(LD_DIR, "Applied consensus diff (size %d) from server "
|
|
|
|
"'%s:%d', resulting in a new consensus document (size %d).",
|
2017-05-03 21:08:59 +02:00
|
|
|
(int)body_len, conn->base_.address, conn->base_.port,
|
|
|
|
(int)strlen(new_consensus));
|
|
|
|
consensus = new_consensus;
|
|
|
|
sourcename = "generated based on a diff";
|
|
|
|
} else {
|
|
|
|
log_info(LD_DIR,"Received consensus directory (body size %d) from server "
|
|
|
|
"'%s:%d'", (int)body_len, conn->base_.address, conn->base_.port);
|
|
|
|
consensus = body;
|
|
|
|
sourcename = "downloaded";
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((r=networkstatus_set_current_consensus(consensus, flavname, 0,
|
2017-05-02 19:11:44 +02:00
|
|
|
conn->identity_digest))<0) {
|
|
|
|
log_fn(r<-1?LOG_WARN:LOG_INFO, LD_DIR,
|
2017-05-03 21:08:59 +02:00
|
|
|
"Unable to load %s consensus directory %s from "
|
2017-05-02 19:11:44 +02:00
|
|
|
"server '%s:%d'. I'll try again soon.",
|
2017-05-03 21:08:59 +02:00
|
|
|
flavname, sourcename, conn->base_.address, conn->base_.port);
|
2017-05-02 19:11:44 +02:00
|
|
|
networkstatus_consensus_download_failed(0, flavname);
|
2017-05-03 21:08:59 +02:00
|
|
|
tor_free(new_consensus);
|
2017-05-02 19:11:44 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2016-04-13 08:54:31 +02:00
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
/* If we launched other fetches for this consensus, cancel them. */
|
|
|
|
connection_dir_close_consensus_fetches(conn, flavname);
|
|
|
|
|
2017-10-25 18:18:38 +02:00
|
|
|
/* update the list of routers and directory guards */
|
2017-05-02 19:11:44 +02:00
|
|
|
routers_update_all_from_networkstatus(now, 3);
|
|
|
|
update_microdescs_from_networkstatus(now);
|
|
|
|
directory_info_has_arrived(now, 0, 0);
|
2017-10-25 18:18:38 +02:00
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
if (authdir_mode_v3(get_options())) {
|
|
|
|
sr_act_post_consensus(
|
|
|
|
networkstatus_get_latest_consensus_by_flavor(FLAV_NS));
|
|
|
|
}
|
|
|
|
log_info(LD_DIR, "Successfully loaded consensus.");
|
2007-10-28 21:30:21 +01:00
|
|
|
|
2017-05-03 21:08:59 +02:00
|
|
|
tor_free(new_consensus);
|
2017-05-02 19:06:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a request for one or more
|
|
|
|
* authority certificates
|
|
|
|
**/
|
|
|
|
static int
|
|
|
|
handle_response_fetch_certificate(dir_connection_t *conn,
|
|
|
|
const response_handler_args_t *args)
|
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_CERTIFICATE);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
const char *body = args->body;
|
|
|
|
const size_t body_len = args->body_len;
|
|
|
|
|
|
|
|
if (status_code != 200) {
|
2017-05-02 19:11:44 +02:00
|
|
|
log_warn(LD_DIR,
|
|
|
|
"Received http status code %d (%s) from server "
|
|
|
|
"'%s:%d' while fetching \"/tor/keys/%s\".",
|
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port, conn->requested_resource);
|
|
|
|
connection_dir_download_cert_failed(conn, status_code);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
log_info(LD_DIR,"Received authority certificates (body size %d) from "
|
|
|
|
"server '%s:%d'",
|
|
|
|
(int)body_len, conn->base_.address, conn->base_.port);
|
2013-05-09 13:56:54 +02:00
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
/*
|
|
|
|
* Tell trusted_dirs_load_certs_from_string() whether it was by fp
|
|
|
|
* or fp-sk pair.
|
|
|
|
*/
|
|
|
|
int src_code = -1;
|
|
|
|
if (!strcmpstart(conn->requested_resource, "fp/")) {
|
|
|
|
src_code = TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST;
|
|
|
|
} else if (!strcmpstart(conn->requested_resource, "fp-sk/")) {
|
|
|
|
src_code = TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_SK_DIGEST;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src_code != -1) {
|
|
|
|
if (trusted_dirs_load_certs_from_string(body, src_code, 1,
|
|
|
|
conn->identity_digest)<0) {
|
|
|
|
log_warn(LD_DIR, "Unable to parse fetched certificates");
|
|
|
|
/* if we fetched more than one and only some failed, the successful
|
|
|
|
* ones got flushed to disk so it's safe to call this on them */
|
2013-05-09 13:56:54 +02:00
|
|
|
connection_dir_download_cert_failed(conn, status_code);
|
2017-05-02 19:11:44 +02:00
|
|
|
} else {
|
|
|
|
time_t now = approx_time();
|
|
|
|
directory_info_has_arrived(now, 0, 0);
|
|
|
|
log_info(LD_DIR, "Successfully loaded certificates from fetch.");
|
2007-09-08 21:08:39 +02:00
|
|
|
}
|
2017-05-02 19:11:44 +02:00
|
|
|
} else {
|
|
|
|
log_warn(LD_DIR,
|
|
|
|
"Couldn't figure out what to do with fetched certificates for "
|
|
|
|
"unknown resource %s",
|
|
|
|
conn->requested_resource);
|
|
|
|
connection_dir_download_cert_failed(conn, status_code);
|
|
|
|
}
|
2017-05-02 19:06:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a request for an authority's
|
|
|
|
* current networkstatus vote.
|
|
|
|
**/
|
|
|
|
static int
|
|
|
|
handle_response_fetch_status_vote(dir_connection_t *conn,
|
2017-05-02 19:11:44 +02:00
|
|
|
const response_handler_args_t *args)
|
2017-05-02 19:06:25 +02:00
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_STATUS_VOTE);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
const char *body = args->body;
|
|
|
|
const size_t body_len = args->body_len;
|
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
const char *msg;
|
|
|
|
int st;
|
|
|
|
log_info(LD_DIR,"Got votes (body size %d) from server %s:%d",
|
|
|
|
(int)body_len, conn->base_.address, conn->base_.port);
|
|
|
|
if (status_code != 200) {
|
|
|
|
log_warn(LD_DIR,
|
2007-09-22 08:06:05 +02:00
|
|
|
"Received http status code %d (%s) from server "
|
|
|
|
"'%s:%d' while fetching \"/tor/status-vote/next/%s.z\".",
|
2012-10-12 18:22:13 +02:00
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port, conn->requested_resource);
|
2017-05-02 19:11:44 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
dirvote_add_vote(body, &msg, &st);
|
|
|
|
if (st > 299) {
|
|
|
|
log_warn(LD_DIR, "Error adding retrieved vote: %s", msg);
|
|
|
|
} else {
|
|
|
|
log_info(LD_DIR, "Added vote(s) successfully [msg: %s]", msg);
|
|
|
|
}
|
2017-05-02 19:06:25 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a request for the signatures
|
|
|
|
* that an authority knows about on a given consensus.
|
|
|
|
**/
|
|
|
|
static int
|
|
|
|
handle_response_fetch_detached_signatures(dir_connection_t *conn,
|
2017-05-02 19:11:44 +02:00
|
|
|
const response_handler_args_t *args)
|
2017-05-02 19:06:25 +02:00
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_DETACHED_SIGNATURES);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
const char *body = args->body;
|
|
|
|
const size_t body_len = args->body_len;
|
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
const char *msg = NULL;
|
|
|
|
log_info(LD_DIR,"Got detached signatures (body size %d) from server %s:%d",
|
|
|
|
(int)body_len, conn->base_.address, conn->base_.port);
|
|
|
|
if (status_code != 200) {
|
|
|
|
log_warn(LD_DIR,
|
2009-11-04 17:38:57 +01:00
|
|
|
"Received http status code %d (%s) from server '%s:%d' while fetching "
|
|
|
|
"\"/tor/status-vote/next/consensus-signatures.z\".",
|
2017-05-02 19:11:44 +02:00
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (dirvote_add_signatures(body, conn->base_.address, &msg)<0) {
|
|
|
|
log_warn(LD_DIR, "Problem adding detached signatures from %s:%d: %s",
|
|
|
|
conn->base_.address, conn->base_.port, msg?msg:"???");
|
|
|
|
}
|
2007-08-24 16:41:06 +02:00
|
|
|
|
2017-05-02 19:06:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a request for a group of server
|
|
|
|
* descriptors or an extrainfo documents.
|
|
|
|
**/
|
|
|
|
static int
|
|
|
|
handle_response_fetch_desc(dir_connection_t *conn,
|
|
|
|
const response_handler_args_t *args)
|
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_SERVERDESC ||
|
|
|
|
conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
const char *body = args->body;
|
|
|
|
const size_t body_len = args->body_len;
|
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
int was_ei = conn->base_.purpose == DIR_PURPOSE_FETCH_EXTRAINFO;
|
|
|
|
smartlist_t *which = NULL;
|
|
|
|
int n_asked_for = 0;
|
|
|
|
int descriptor_digests = conn->requested_resource &&
|
|
|
|
!strcmpstart(conn->requested_resource,"d/");
|
|
|
|
log_info(LD_DIR,"Received %s (body size %d) from server '%s:%d'",
|
|
|
|
was_ei ? "extra server info" : "server info",
|
|
|
|
(int)body_len, conn->base_.address, conn->base_.port);
|
|
|
|
if (conn->requested_resource &&
|
|
|
|
(!strcmpstart(conn->requested_resource,"d/") ||
|
|
|
|
!strcmpstart(conn->requested_resource,"fp/"))) {
|
|
|
|
which = smartlist_new();
|
|
|
|
dir_split_resource_into_fingerprints(conn->requested_resource +
|
|
|
|
(descriptor_digests ? 2 : 3),
|
|
|
|
which, NULL, 0);
|
|
|
|
n_asked_for = smartlist_len(which);
|
|
|
|
}
|
|
|
|
if (status_code != 200) {
|
|
|
|
int dir_okay = status_code == 404 ||
|
|
|
|
(status_code == 400 && !strcmp(reason, "Servers unavailable."));
|
|
|
|
/* 404 means that it didn't have them; no big deal.
|
|
|
|
* Older (pre-0.1.1.8) servers said 400 Servers unavailable instead. */
|
|
|
|
log_fn(dir_okay ? LOG_INFO : LOG_WARN, LD_DIR,
|
|
|
|
"Received http status code %d (%s) from server '%s:%d' "
|
|
|
|
"while fetching \"/tor/server/%s\". I'll try again soon.",
|
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port, conn->requested_resource);
|
|
|
|
if (!which) {
|
|
|
|
connection_dir_download_routerdesc_failed(conn);
|
|
|
|
} else {
|
|
|
|
dir_routerdesc_download_failed(which, status_code,
|
|
|
|
conn->router_purpose,
|
|
|
|
was_ei, descriptor_digests);
|
|
|
|
SMARTLIST_FOREACH(which, char *, cp, tor_free(cp));
|
|
|
|
smartlist_free(which);
|
2005-09-18 06:15:39 +02:00
|
|
|
}
|
2017-05-02 19:11:44 +02:00
|
|
|
return dir_okay ? 0 : -1;
|
|
|
|
}
|
|
|
|
/* Learn the routers, assuming we requested by fingerprint or "all"
|
|
|
|
* or "authority".
|
|
|
|
*
|
|
|
|
* We use "authority" to fetch our own descriptor for
|
|
|
|
* testing, and to fetch bridge descriptors for bootstrapping. Ignore
|
|
|
|
* the output of "authority" requests unless we are using bridges,
|
|
|
|
* since otherwise they'll be the response from reachability tests,
|
|
|
|
* and we don't really want to add that to our routerlist. */
|
|
|
|
if (which || (conn->requested_resource &&
|
|
|
|
(!strcmpstart(conn->requested_resource, "all") ||
|
|
|
|
(!strcmpstart(conn->requested_resource, "authority") &&
|
|
|
|
get_options()->UseBridges)))) {
|
|
|
|
/* as we learn from them, we remove them from 'which' */
|
|
|
|
if (was_ei) {
|
|
|
|
router_load_extrainfo_from_string(body, NULL, SAVED_NOWHERE, which,
|
|
|
|
descriptor_digests);
|
|
|
|
} else {
|
|
|
|
//router_load_routers_from_string(body, NULL, SAVED_NOWHERE, which,
|
|
|
|
// descriptor_digests, conn->router_purpose);
|
|
|
|
if (load_downloaded_routers(body, which, descriptor_digests,
|
|
|
|
conn->router_purpose,
|
|
|
|
conn->base_.address)) {
|
|
|
|
time_t now = approx_time();
|
2017-11-16 16:51:41 +01:00
|
|
|
directory_info_has_arrived(now, 0, 1);
|
2007-05-18 23:19:19 +02:00
|
|
|
}
|
2005-09-30 22:04:55 +02:00
|
|
|
}
|
2017-05-02 19:11:44 +02:00
|
|
|
}
|
|
|
|
if (which) { /* mark remaining ones as failed */
|
|
|
|
log_info(LD_DIR, "Received %d/%d %s requested from %s:%d",
|
|
|
|
n_asked_for-smartlist_len(which), n_asked_for,
|
|
|
|
was_ei ? "extra-info documents" : "router descriptors",
|
|
|
|
conn->base_.address, (int)conn->base_.port);
|
|
|
|
if (smartlist_len(which)) {
|
|
|
|
dir_routerdesc_download_failed(which, status_code,
|
|
|
|
conn->router_purpose,
|
|
|
|
was_ei, descriptor_digests);
|
2005-09-15 07:19:38 +02:00
|
|
|
}
|
2017-05-02 19:11:44 +02:00
|
|
|
SMARTLIST_FOREACH(which, char *, cp, tor_free(cp));
|
|
|
|
smartlist_free(which);
|
|
|
|
}
|
|
|
|
if (directory_conn_is_self_reachability_test(conn))
|
|
|
|
router_dirport_found_reachable();
|
2017-05-02 19:06:25 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a request for a group of
|
|
|
|
* microdescriptors
|
|
|
|
**/
|
2017-11-06 13:48:22 +01:00
|
|
|
STATIC int
|
2017-05-02 19:06:25 +02:00
|
|
|
handle_response_fetch_microdesc(dir_connection_t *conn,
|
|
|
|
const response_handler_args_t *args)
|
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_MICRODESC);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
const char *body = args->body;
|
|
|
|
const size_t body_len = args->body_len;
|
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
smartlist_t *which = NULL;
|
|
|
|
log_info(LD_DIR,"Received answer to microdescriptor request (status %d, "
|
|
|
|
"body size %d) from server '%s:%d'",
|
|
|
|
status_code, (int)body_len, conn->base_.address,
|
|
|
|
conn->base_.port);
|
|
|
|
tor_assert(conn->requested_resource &&
|
|
|
|
!strcmpstart(conn->requested_resource, "d/"));
|
2017-11-06 13:48:22 +01:00
|
|
|
tor_assert_nonfatal(!tor_mem_is_zero(conn->identity_digest, DIGEST_LEN));
|
2017-05-02 19:11:44 +02:00
|
|
|
which = smartlist_new();
|
|
|
|
dir_split_resource_into_fingerprints(conn->requested_resource+2,
|
|
|
|
which, NULL,
|
|
|
|
DSR_DIGEST256|DSR_BASE64);
|
|
|
|
if (status_code != 200) {
|
|
|
|
log_info(LD_DIR, "Received status code %d (%s) from server "
|
|
|
|
"'%s:%d' while fetching \"/tor/micro/%s\". I'll try again "
|
|
|
|
"soon.",
|
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
(int)conn->base_.port, conn->requested_resource);
|
2017-11-06 13:48:22 +01:00
|
|
|
dir_microdesc_download_failed(which, status_code, conn->identity_digest);
|
2017-05-02 19:11:44 +02:00
|
|
|
SMARTLIST_FOREACH(which, char *, cp, tor_free(cp));
|
|
|
|
smartlist_free(which);
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
smartlist_t *mds;
|
|
|
|
time_t now = approx_time();
|
|
|
|
mds = microdescs_add_to_cache(get_microdesc_cache(),
|
|
|
|
body, body+body_len, SAVED_NOWHERE, 0,
|
|
|
|
now, which);
|
|
|
|
if (smartlist_len(which)) {
|
|
|
|
/* Mark remaining ones as failed. */
|
2017-11-06 13:48:22 +01:00
|
|
|
dir_microdesc_download_failed(which, status_code, conn->identity_digest);
|
2010-05-11 23:20:33 +02:00
|
|
|
}
|
2017-05-02 19:11:44 +02:00
|
|
|
if (mds && smartlist_len(mds)) {
|
|
|
|
control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_DESCRIPTORS,
|
|
|
|
count_loading_descriptors_progress());
|
|
|
|
directory_info_has_arrived(now, 0, 1);
|
|
|
|
}
|
|
|
|
SMARTLIST_FOREACH(which, char *, cp, tor_free(cp));
|
|
|
|
smartlist_free(which);
|
|
|
|
smartlist_free(mds);
|
|
|
|
}
|
2005-09-08 08:22:44 +02:00
|
|
|
|
2017-05-02 19:06:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a POST request to upload our
|
|
|
|
* router descriptor.
|
|
|
|
**/
|
|
|
|
static int
|
|
|
|
handle_response_upload_dir(dir_connection_t *conn,
|
|
|
|
const response_handler_args_t *args)
|
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_UPLOAD_DIR);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
const char *headers = args->headers;
|
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
switch (status_code) {
|
|
|
|
case 200: {
|
|
|
|
dir_server_t *ds =
|
|
|
|
router_get_trusteddirserver_by_digest(conn->identity_digest);
|
|
|
|
char *rejected_hdr = http_get_header(headers,
|
|
|
|
"X-Descriptor-Not-New: ");
|
|
|
|
if (rejected_hdr) {
|
|
|
|
if (!strcmp(rejected_hdr, "Yes")) {
|
|
|
|
log_info(LD_GENERAL,
|
|
|
|
"Authority '%s' declined our descriptor (not new)",
|
|
|
|
ds->nickname);
|
|
|
|
/* XXXX use this information; be sure to upload next one
|
|
|
|
* sooner. -NM */
|
|
|
|
/* XXXX++ On further thought, the task above implies that we're
|
|
|
|
* basing our regenerate-descriptor time on when we uploaded the
|
|
|
|
* last descriptor, not on the published time of the last
|
|
|
|
* descriptor. If those are different, that's a bad thing to
|
|
|
|
* do. -NM */
|
|
|
|
}
|
|
|
|
tor_free(rejected_hdr);
|
|
|
|
}
|
|
|
|
log_info(LD_GENERAL,"eof (status 200) after uploading server "
|
|
|
|
"descriptor: finished.");
|
|
|
|
control_event_server_status(
|
|
|
|
LOG_NOTICE, "ACCEPTED_SERVER_DESCRIPTOR DIRAUTH=%s:%d",
|
|
|
|
conn->base_.address, conn->base_.port);
|
|
|
|
|
|
|
|
ds->has_accepted_serverdesc = 1;
|
|
|
|
if (directories_have_accepted_server_descriptor())
|
|
|
|
control_event_server_status(LOG_NOTICE, "GOOD_SERVER_DESCRIPTOR");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 400:
|
|
|
|
log_warn(LD_GENERAL,"http status 400 (%s) response from "
|
|
|
|
"dirserver '%s:%d'. Please correct.",
|
|
|
|
escaped(reason), conn->base_.address, conn->base_.port);
|
|
|
|
control_event_server_status(LOG_WARN,
|
|
|
|
"BAD_SERVER_DESCRIPTOR DIRAUTH=%s:%d REASON=\"%s\"",
|
|
|
|
conn->base_.address, conn->base_.port, escaped(reason));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log_warn(LD_GENERAL,
|
2017-05-24 15:08:59 +02:00
|
|
|
"HTTP status %d (%s) was unexpected while uploading "
|
|
|
|
"descriptor to server '%s:%d'. Possibly the server is "
|
|
|
|
"misconfigured?",
|
2012-10-12 18:22:13 +02:00
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port);
|
2017-05-02 19:11:44 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* return 0 in all cases, since we don't want to mark any
|
|
|
|
* dirservers down just because they don't like us. */
|
2004-06-16 23:08:29 +02:00
|
|
|
|
2017-05-02 19:06:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to POST request to upload our
|
|
|
|
* own networkstatus vote.
|
|
|
|
**/
|
|
|
|
static int
|
|
|
|
handle_response_upload_vote(dir_connection_t *conn,
|
|
|
|
const response_handler_args_t *args)
|
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_UPLOAD_VOTE);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
switch (status_code) {
|
|
|
|
case 200: {
|
|
|
|
log_notice(LD_DIR,"Uploaded a vote to dirserver %s:%d",
|
|
|
|
conn->base_.address, conn->base_.port);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 400:
|
|
|
|
log_warn(LD_DIR,"http status 400 (%s) response after uploading "
|
|
|
|
"vote to dirserver '%s:%d'. Please correct.",
|
|
|
|
escaped(reason), conn->base_.address, conn->base_.port);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log_warn(LD_GENERAL,
|
2017-05-24 15:08:59 +02:00
|
|
|
"HTTP status %d (%s) was unexpected while uploading "
|
|
|
|
"vote to server '%s:%d'.",
|
2012-10-12 18:22:13 +02:00
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port);
|
2017-05-02 19:11:44 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* return 0 in all cases, since we don't want to mark any
|
|
|
|
* dirservers down just because they don't like us. */
|
2017-05-02 19:06:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to POST request to upload our
|
|
|
|
* view of the signatures on the current consensus.
|
|
|
|
**/
|
|
|
|
static int
|
|
|
|
handle_response_upload_signatures(dir_connection_t *conn,
|
|
|
|
const response_handler_args_t *args)
|
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_UPLOAD_SIGNATURES);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
2007-07-26 00:56:44 +02:00
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
switch (status_code) {
|
|
|
|
case 200: {
|
|
|
|
log_notice(LD_DIR,"Uploaded signature(s) to dirserver %s:%d",
|
|
|
|
conn->base_.address, conn->base_.port);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 400:
|
|
|
|
log_warn(LD_DIR,"http status 400 (%s) response after uploading "
|
|
|
|
"signatures to dirserver '%s:%d'. Please correct.",
|
|
|
|
escaped(reason), conn->base_.address, conn->base_.port);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log_warn(LD_GENERAL,
|
2017-05-24 15:08:59 +02:00
|
|
|
"HTTP status %d (%s) was unexpected while uploading "
|
|
|
|
"signatures to server '%s:%d'.",
|
2012-10-12 18:22:13 +02:00
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port);
|
2017-05-02 19:11:44 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* return 0 in all cases, since we don't want to mark any
|
|
|
|
* dirservers down just because they don't like us. */
|
2007-07-29 04:55:21 +02:00
|
|
|
|
2017-05-02 19:06:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-01 13:25:46 +02:00
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a request for a v3 hidden service
|
|
|
|
* descriptor.
|
|
|
|
**/
|
|
|
|
STATIC int
|
|
|
|
handle_response_fetch_hsdesc_v3(dir_connection_t *conn,
|
|
|
|
const response_handler_args_t *args)
|
|
|
|
{
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
const char *body = args->body;
|
|
|
|
const size_t body_len = args->body_len;
|
|
|
|
|
|
|
|
tor_assert(conn->hs_ident);
|
|
|
|
|
|
|
|
log_info(LD_REND,"Received v3 hsdesc (body size %d, status %d (%s))",
|
|
|
|
(int)body_len, status_code, escaped(reason));
|
|
|
|
|
|
|
|
switch (status_code) {
|
|
|
|
case 200:
|
|
|
|
/* We got something: Try storing it in the cache. */
|
|
|
|
if (hs_cache_store_as_client(body, &conn->hs_ident->identity_pk) < 0) {
|
|
|
|
log_warn(LD_REND, "Failed to store hidden service descriptor");
|
2017-11-10 20:01:33 +01:00
|
|
|
/* Fire control port FAILED event. */
|
|
|
|
hs_control_desc_event_failed(conn->hs_ident, conn->identity_digest,
|
|
|
|
"BAD_DESC");
|
2017-11-14 17:06:35 +01:00
|
|
|
hs_control_desc_event_content(conn->hs_ident, conn->identity_digest,
|
|
|
|
NULL);
|
2017-06-01 13:25:46 +02:00
|
|
|
} else {
|
|
|
|
log_info(LD_REND, "Stored hidden service descriptor successfully.");
|
2017-06-27 15:46:16 +02:00
|
|
|
TO_CONN(conn)->purpose = DIR_PURPOSE_HAS_FETCHED_HSDESC;
|
2017-07-21 20:32:47 +02:00
|
|
|
hs_client_desc_has_arrived(conn->hs_ident);
|
2017-11-10 20:12:34 +01:00
|
|
|
/* Fire control port RECEIVED event. */
|
|
|
|
hs_control_desc_event_received(conn->hs_ident, conn->identity_digest);
|
2017-11-14 17:06:35 +01:00
|
|
|
hs_control_desc_event_content(conn->hs_ident, conn->identity_digest,
|
|
|
|
body);
|
2017-06-01 13:25:46 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 404:
|
|
|
|
/* Not there. We'll retry when connection_about_to_close_connection()
|
|
|
|
* tries to clean this conn up. */
|
|
|
|
log_info(LD_REND, "Fetching hidden service v3 descriptor not found: "
|
|
|
|
"Retrying at another directory.");
|
2017-11-10 20:01:33 +01:00
|
|
|
/* Fire control port FAILED event. */
|
|
|
|
hs_control_desc_event_failed(conn->hs_ident, conn->identity_digest,
|
|
|
|
"NOT_FOUND");
|
2017-11-14 17:06:35 +01:00
|
|
|
hs_control_desc_event_content(conn->hs_ident, conn->identity_digest,
|
|
|
|
NULL);
|
2017-06-01 13:25:46 +02:00
|
|
|
break;
|
|
|
|
case 400:
|
|
|
|
log_warn(LD_REND, "Fetching v3 hidden service descriptor failed: "
|
|
|
|
"http status 400 (%s). Dirserver didn't like our "
|
|
|
|
"query? Retrying at another directory.",
|
|
|
|
escaped(reason));
|
2017-11-10 20:01:33 +01:00
|
|
|
/* Fire control port FAILED event. */
|
|
|
|
hs_control_desc_event_failed(conn->hs_ident, conn->identity_digest,
|
|
|
|
"QUERY_REJECTED");
|
2017-11-14 17:06:35 +01:00
|
|
|
hs_control_desc_event_content(conn->hs_ident, conn->identity_digest,
|
|
|
|
NULL);
|
2017-06-01 13:25:46 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log_warn(LD_REND, "Fetching v3 hidden service descriptor failed: "
|
|
|
|
"http status %d (%s) response unexpected from HSDir server "
|
|
|
|
"'%s:%d'. Retrying at another directory.",
|
|
|
|
status_code, escaped(reason), TO_CONN(conn)->address,
|
|
|
|
TO_CONN(conn)->port);
|
2017-11-10 20:01:33 +01:00
|
|
|
/* Fire control port FAILED event. */
|
|
|
|
hs_control_desc_event_failed(conn->hs_ident, conn->identity_digest,
|
|
|
|
"UNEXPECTED");
|
2017-11-14 17:06:35 +01:00
|
|
|
hs_control_desc_event_content(conn->hs_ident, conn->identity_digest,
|
|
|
|
NULL);
|
2017-06-01 13:25:46 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-02 19:06:25 +02:00
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a request for a v2 hidden service
|
|
|
|
* descriptor.
|
|
|
|
**/
|
|
|
|
static int
|
|
|
|
handle_response_fetch_renddesc_v2(dir_connection_t *conn,
|
2017-05-02 19:11:44 +02:00
|
|
|
const response_handler_args_t *args)
|
2017-05-02 19:06:25 +02:00
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_FETCH_RENDDESC_V2);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
const char *body = args->body;
|
|
|
|
const size_t body_len = args->body_len;
|
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
#define SEND_HS_DESC_FAILED_EVENT(reason) \
|
2017-11-10 17:25:16 +01:00
|
|
|
(control_event_hsv2_descriptor_failed(conn->rend_data, \
|
|
|
|
conn->identity_digest, \
|
|
|
|
reason))
|
2017-05-02 19:11:44 +02:00
|
|
|
#define SEND_HS_DESC_FAILED_CONTENT() \
|
|
|
|
(control_event_hs_descriptor_content( \
|
|
|
|
rend_data_get_address(conn->rend_data), \
|
|
|
|
conn->requested_resource, \
|
|
|
|
conn->identity_digest, \
|
|
|
|
NULL))
|
|
|
|
|
|
|
|
tor_assert(conn->rend_data);
|
|
|
|
log_info(LD_REND,"Received rendezvous descriptor (body size %d, status %d "
|
|
|
|
"(%s))",
|
|
|
|
(int)body_len, status_code, escaped(reason));
|
|
|
|
switch (status_code) {
|
|
|
|
case 200:
|
|
|
|
{
|
|
|
|
rend_cache_entry_t *entry = NULL;
|
|
|
|
|
|
|
|
if (rend_cache_store_v2_desc_as_client(body,
|
|
|
|
conn->requested_resource,
|
|
|
|
conn->rend_data, &entry) < 0) {
|
|
|
|
log_warn(LD_REND,"Fetching v2 rendezvous descriptor failed. "
|
|
|
|
"Retrying at another directory.");
|
|
|
|
/* We'll retry when connection_about_to_close_connection()
|
|
|
|
* cleans this dir conn up. */
|
|
|
|
SEND_HS_DESC_FAILED_EVENT("BAD_DESC");
|
2015-02-24 22:17:14 +01:00
|
|
|
SEND_HS_DESC_FAILED_CONTENT();
|
2017-05-02 19:11:44 +02:00
|
|
|
} else {
|
|
|
|
char service_id[REND_SERVICE_ID_LEN_BASE32 + 1];
|
|
|
|
/* Should never be NULL here if we found the descriptor. */
|
|
|
|
tor_assert(entry);
|
|
|
|
rend_get_service_id(entry->parsed->pk, service_id);
|
|
|
|
|
|
|
|
/* success. notify pending connections about this. */
|
|
|
|
log_info(LD_REND, "Successfully fetched v2 rendezvous "
|
|
|
|
"descriptor.");
|
2017-11-10 17:25:16 +01:00
|
|
|
control_event_hsv2_descriptor_received(service_id,
|
|
|
|
conn->rend_data,
|
|
|
|
conn->identity_digest);
|
2017-05-02 19:11:44 +02:00
|
|
|
control_event_hs_descriptor_content(service_id,
|
|
|
|
conn->requested_resource,
|
|
|
|
conn->identity_digest,
|
|
|
|
body);
|
|
|
|
conn->base_.purpose = DIR_PURPOSE_HAS_FETCHED_RENDDESC_V2;
|
|
|
|
rend_client_desc_trynow(service_id);
|
|
|
|
memwipe(service_id, 0, sizeof(service_id));
|
|
|
|
}
|
|
|
|
break;
|
2007-10-29 20:10:42 +01:00
|
|
|
}
|
2017-05-02 19:11:44 +02:00
|
|
|
case 404:
|
|
|
|
/* Not there. We'll retry when
|
|
|
|
* connection_about_to_close_connection() cleans this conn up. */
|
|
|
|
log_info(LD_REND,"Fetching v2 rendezvous descriptor failed: "
|
|
|
|
"Retrying at another directory.");
|
|
|
|
SEND_HS_DESC_FAILED_EVENT("NOT_FOUND");
|
|
|
|
SEND_HS_DESC_FAILED_CONTENT();
|
|
|
|
break;
|
|
|
|
case 400:
|
|
|
|
log_warn(LD_REND, "Fetching v2 rendezvous descriptor failed: "
|
|
|
|
"http status 400 (%s). Dirserver didn't like our "
|
|
|
|
"v2 rendezvous query? Retrying at another directory.",
|
|
|
|
escaped(reason));
|
|
|
|
SEND_HS_DESC_FAILED_EVENT("QUERY_REJECTED");
|
|
|
|
SEND_HS_DESC_FAILED_CONTENT();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log_warn(LD_REND, "Fetching v2 rendezvous descriptor failed: "
|
|
|
|
"http status %d (%s) response unexpected while "
|
|
|
|
"fetching v2 hidden service descriptor (server '%s:%d'). "
|
|
|
|
"Retrying at another directory.",
|
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port);
|
|
|
|
SEND_HS_DESC_FAILED_EVENT("UNEXPECTED");
|
|
|
|
SEND_HS_DESC_FAILED_CONTENT();
|
|
|
|
break;
|
|
|
|
}
|
2007-10-29 20:10:42 +01:00
|
|
|
|
2017-05-02 19:06:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a POST request to upload a v2
|
|
|
|
* hidden service descriptor.
|
|
|
|
**/
|
|
|
|
static int
|
|
|
|
handle_response_upload_renddesc_v2(dir_connection_t *conn,
|
|
|
|
const response_handler_args_t *args)
|
|
|
|
{
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_UPLOAD_RENDDESC_V2);
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
|
2017-05-02 19:11:44 +02:00
|
|
|
#define SEND_HS_DESC_UPLOAD_FAILED_EVENT(reason) \
|
|
|
|
(control_event_hs_descriptor_upload_failed( \
|
|
|
|
conn->identity_digest, \
|
|
|
|
rend_data_get_address(conn->rend_data), \
|
|
|
|
reason))
|
|
|
|
|
|
|
|
log_info(LD_REND,"Uploaded rendezvous descriptor (status %d "
|
|
|
|
"(%s))",
|
|
|
|
status_code, escaped(reason));
|
|
|
|
/* Without the rend data, we'll have a problem identifying what has been
|
|
|
|
* uploaded for which service. */
|
|
|
|
tor_assert(conn->rend_data);
|
|
|
|
switch (status_code) {
|
|
|
|
case 200:
|
|
|
|
log_info(LD_REND,
|
|
|
|
"Uploading rendezvous descriptor: finished with status "
|
|
|
|
"200 (%s)", escaped(reason));
|
|
|
|
control_event_hs_descriptor_uploaded(conn->identity_digest,
|
|
|
|
rend_data_get_address(conn->rend_data));
|
|
|
|
rend_service_desc_has_uploaded(conn->rend_data);
|
|
|
|
break;
|
|
|
|
case 400:
|
|
|
|
log_warn(LD_REND,"http status 400 (%s) response from dirserver "
|
|
|
|
"'%s:%d'. Malformed rendezvous descriptor?",
|
|
|
|
escaped(reason), conn->base_.address, conn->base_.port);
|
|
|
|
SEND_HS_DESC_UPLOAD_FAILED_EVENT("UPLOAD_REJECTED");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log_warn(LD_REND,"http status %d (%s) response unexpected (server "
|
|
|
|
"'%s:%d').",
|
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port);
|
|
|
|
SEND_HS_DESC_UPLOAD_FAILED_EVENT("UNEXPECTED");
|
|
|
|
break;
|
|
|
|
}
|
2017-05-02 19:06:25 +02:00
|
|
|
|
2004-07-20 04:44:26 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2017-04-19 20:36:53 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Handler function: processes a response to a POST request to upload an
|
|
|
|
* hidden service descriptor.
|
|
|
|
**/
|
|
|
|
static int
|
|
|
|
handle_response_upload_hsdesc(dir_connection_t *conn,
|
|
|
|
const response_handler_args_t *args)
|
|
|
|
{
|
|
|
|
const int status_code = args->status_code;
|
|
|
|
const char *reason = args->reason;
|
|
|
|
|
|
|
|
tor_assert(conn);
|
|
|
|
tor_assert(conn->base_.purpose == DIR_PURPOSE_UPLOAD_HSDESC);
|
|
|
|
|
|
|
|
log_info(LD_REND, "Uploaded hidden service descriptor (status %d "
|
|
|
|
"(%s))",
|
|
|
|
status_code, escaped(reason));
|
|
|
|
/* For this directory response, it MUST have an hidden service identifier on
|
|
|
|
* this connection. */
|
|
|
|
tor_assert(conn->hs_ident);
|
|
|
|
switch (status_code) {
|
|
|
|
case 200:
|
|
|
|
log_info(LD_REND, "Uploading hidden service descriptor: "
|
|
|
|
"finished with status 200 (%s)", escaped(reason));
|
2017-11-10 21:00:18 +01:00
|
|
|
hs_control_desc_event_uploaded(conn->hs_ident, conn->identity_digest);
|
2017-04-19 20:36:53 +02:00
|
|
|
break;
|
|
|
|
case 400:
|
2017-11-14 16:16:33 +01:00
|
|
|
log_fn(LOG_PROTOCOL_WARN, LD_REND,
|
|
|
|
"Uploading hidden service descriptor: http "
|
|
|
|
"status 400 (%s) response from dirserver "
|
|
|
|
"'%s:%d'. Malformed hidden service descriptor?",
|
|
|
|
escaped(reason), conn->base_.address, conn->base_.port);
|
2017-11-10 21:08:05 +01:00
|
|
|
hs_control_desc_event_failed(conn->hs_ident, conn->identity_digest,
|
|
|
|
"UPLOAD_REJECTED");
|
2017-04-19 20:36:53 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log_warn(LD_REND, "Uploading hidden service descriptor: http "
|
|
|
|
"status %d (%s) response unexpected (server "
|
|
|
|
"'%s:%d').",
|
|
|
|
status_code, escaped(reason), conn->base_.address,
|
|
|
|
conn->base_.port);
|
2017-11-10 21:08:05 +01:00
|
|
|
hs_control_desc_event_failed(conn->hs_ident, conn->identity_digest,
|
|
|
|
"UNEXPECTED");
|
2017-04-19 20:36:53 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2004-07-20 04:44:26 +02:00
|
|
|
|
2008-02-09 04:11:10 +01:00
|
|
|
/** Called when a directory connection reaches EOF. */
|
2005-06-11 20:52:12 +02:00
|
|
|
int
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_dir_reached_eof(dir_connection_t *conn)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2004-11-21 11:14:57 +01:00
|
|
|
int retval;
|
2012-10-12 18:22:13 +02:00
|
|
|
if (conn->base_.state != DIR_CONN_STATE_CLIENT_READING) {
|
2007-04-21 19:26:12 +02:00
|
|
|
log_info(LD_HTTP,"conn reached eof, not reading. [state=%d] Closing.",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.state);
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_close_immediate(TO_CONN(conn)); /* error: give up on flushing */
|
|
|
|
connection_mark_for_close(TO_CONN(conn));
|
2004-11-21 11:14:57 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = connection_dir_client_reached_eof(conn);
|
2005-09-12 09:36:26 +02:00
|
|
|
if (retval == 0) /* success */
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.state = DIR_CONN_STATE_CLIENT_FINISHED;
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_mark_for_close(TO_CONN(conn));
|
2004-11-21 11:14:57 +01:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2006-12-29 06:07:25 +01:00
|
|
|
/** If any directory object is arriving, and it's over 10MB large, we're
|
|
|
|
* getting DoS'd. (As of 0.1.2.x, raw directories are about 1MB, and we never
|
|
|
|
* ask for more than 96 router descriptors at a time.)
|
|
|
|
*/
|
|
|
|
#define MAX_DIRECTORY_OBJECT_SIZE (10*(1<<20))
|
|
|
|
|
2015-01-18 21:25:29 +01:00
|
|
|
#define MAX_VOTE_DL_SIZE (MAX_DIRECTORY_OBJECT_SIZE * 5)
|
|
|
|
|
2004-07-20 04:44:26 +02:00
|
|
|
/** Read handler for directory connections. (That's connections <em>to</em>
|
|
|
|
* directory servers and connections <em>at</em> directory servers.)
|
|
|
|
*/
|
2005-09-30 03:09:52 +02:00
|
|
|
int
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_dir_process_inbuf(dir_connection_t *conn)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2015-01-18 21:25:29 +01:00
|
|
|
size_t max_size;
|
2004-10-17 00:14:52 +02:00
|
|
|
tor_assert(conn);
|
2012-10-12 18:22:13 +02:00
|
|
|
tor_assert(conn->base_.type == CONN_TYPE_DIR);
|
2004-07-20 04:44:26 +02:00
|
|
|
|
|
|
|
/* Directory clients write, then read data until they receive EOF;
|
|
|
|
* directory servers read data until they get an HTTP command, then
|
|
|
|
* write their response (when it's finished flushing, they mark for
|
|
|
|
* close).
|
|
|
|
*/
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2004-05-05 04:50:38 +02:00
|
|
|
/* If we're on the dirserver side, look for a command. */
|
2012-10-12 18:22:13 +02:00
|
|
|
if (conn->base_.state == DIR_CONN_STATE_SERVER_COMMAND_WAIT) {
|
2004-03-31 00:57:49 +02:00
|
|
|
if (directory_handle_command(conn) < 0) {
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_mark_for_close(TO_CONN(conn));
|
2004-03-31 00:57:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2004-03-31 07:01:30 +02:00
|
|
|
return 0;
|
2004-03-31 00:57:49 +02:00
|
|
|
}
|
2003-09-17 22:09:06 +02:00
|
|
|
|
2015-01-18 21:25:29 +01:00
|
|
|
max_size =
|
|
|
|
(TO_CONN(conn)->purpose == DIR_PURPOSE_FETCH_STATUS_VOTE) ?
|
|
|
|
MAX_VOTE_DL_SIZE : MAX_DIRECTORY_OBJECT_SIZE;
|
|
|
|
|
|
|
|
if (connection_get_inbuf_len(TO_CONN(conn)) > max_size) {
|
2014-12-10 07:10:44 +01:00
|
|
|
log_warn(LD_HTTP,
|
|
|
|
"Too much data received from directory connection (%s): "
|
|
|
|
"denial of service attempt, or you need to upgrade?",
|
|
|
|
conn->base_.address);
|
2006-12-29 06:07:25 +01:00
|
|
|
connection_mark_for_close(TO_CONN(conn));
|
|
|
|
return -1;
|
|
|
|
}
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2012-10-12 18:22:13 +02:00
|
|
|
if (!conn->base_.inbuf_reached_eof)
|
2006-02-13 10:37:53 +01:00
|
|
|
log_debug(LD_HTTP,"Got data, not eof. Leaving on inbuf.");
|
2002-09-26 14:09:10 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-01 13:22:17 +02:00
|
|
|
/** We are closing a dir connection: If <b>dir_conn</b> is a dir connection
|
2018-01-24 09:55:15 +01:00
|
|
|
* that tried to fetch an HS descriptor, check if it successfully fetched it,
|
2017-06-01 13:22:17 +02:00
|
|
|
* or if we need to try again. */
|
|
|
|
static void
|
|
|
|
refetch_hsdesc_if_needed(dir_connection_t *dir_conn)
|
|
|
|
{
|
|
|
|
connection_t *conn = TO_CONN(dir_conn);
|
|
|
|
|
2017-06-27 15:46:16 +02:00
|
|
|
/* If we were trying to fetch a v2 rend desc and did not succeed, retry as
|
|
|
|
* needed. (If a fetch is successful, the connection state is changed to
|
|
|
|
* DIR_PURPOSE_HAS_FETCHED_RENDDESC_V2 or DIR_PURPOSE_HAS_FETCHED_HSDESC to
|
|
|
|
* mark that refetching is unnecessary.) */
|
2017-06-01 13:22:17 +02:00
|
|
|
if (conn->purpose == DIR_PURPOSE_FETCH_RENDDESC_V2 &&
|
|
|
|
dir_conn->rend_data &&
|
|
|
|
rend_valid_v2_service_id(
|
|
|
|
rend_data_get_address(dir_conn->rend_data))) {
|
|
|
|
rend_client_refetch_v2_renddesc(dir_conn->rend_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for v3 rend desc fetch */
|
|
|
|
if (conn->purpose == DIR_PURPOSE_FETCH_HSDESC &&
|
|
|
|
dir_conn->hs_ident &&
|
|
|
|
!ed25519_public_key_is_zero(&dir_conn->hs_ident->identity_pk)) {
|
|
|
|
hs_client_refetch_hsdesc(&dir_conn->hs_ident->identity_pk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-22 19:57:19 +02:00
|
|
|
/** Called when we're about to finally unlink and free a directory connection:
|
|
|
|
* perform necessary accounting and cleanup */
|
|
|
|
void
|
|
|
|
connection_dir_about_to_close(dir_connection_t *dir_conn)
|
|
|
|
{
|
|
|
|
connection_t *conn = TO_CONN(dir_conn);
|
|
|
|
|
|
|
|
if (conn->state < DIR_CONN_STATE_CLIENT_FINISHED) {
|
|
|
|
/* It's a directory connection and connecting or fetching
|
|
|
|
* failed: forget about this router, and maybe try again. */
|
|
|
|
connection_dir_request_failed(dir_conn);
|
|
|
|
}
|
2017-06-01 13:22:17 +02:00
|
|
|
|
|
|
|
refetch_hsdesc_if_needed(dir_conn);
|
2011-06-22 19:57:19 +02:00
|
|
|
}
|
|
|
|
|
2005-01-20 21:07:36 +01:00
|
|
|
/** Create an http response for the client <b>conn</b> out of
|
|
|
|
* <b>status</b> and <b>reason_phrase</b>. Write it to <b>conn</b>.
|
|
|
|
*/
|
2005-01-20 20:46:02 +01:00
|
|
|
static void
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(dir_connection_t *conn, int status,
|
2005-01-20 20:46:02 +01:00
|
|
|
const char *reason_phrase)
|
|
|
|
{
|
2017-09-14 08:52:00 +02:00
|
|
|
char *buf = NULL;
|
2017-09-14 05:19:04 +02:00
|
|
|
char *datestring = NULL;
|
|
|
|
|
2017-09-14 14:34:58 +02:00
|
|
|
IF_BUG_ONCE(!reason_phrase) { /* bullet-proofing */
|
2017-09-14 04:43:31 +02:00
|
|
|
reason_phrase = "unspecified";
|
2017-09-14 05:19:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (server_mode(get_options())) {
|
|
|
|
/* include the Date: header, but only if we're a relay or bridge */
|
|
|
|
char datebuf[RFC1123_TIME_LEN+1];
|
|
|
|
format_rfc1123_time(datebuf, time(NULL));
|
|
|
|
tor_asprintf(&datestring, "Date: %s\r\n", datebuf);
|
|
|
|
}
|
|
|
|
|
2017-09-14 08:52:00 +02:00
|
|
|
tor_asprintf(&buf, "HTTP/1.0 %d %s\r\n%s\r\n",
|
|
|
|
status, reason_phrase, datestring?datestring:"");
|
2017-09-14 05:19:04 +02:00
|
|
|
|
2014-12-20 11:59:17 +01:00
|
|
|
log_debug(LD_DIRSERV,"Wrote status 'HTTP/1.0 %d %s'", status, reason_phrase);
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(buf, strlen(buf), TO_CONN(conn));
|
2017-09-14 08:52:00 +02:00
|
|
|
|
|
|
|
tor_free(datestring);
|
|
|
|
tor_free(buf);
|
2005-01-20 20:46:02 +01:00
|
|
|
}
|
2004-03-12 13:43:13 +01:00
|
|
|
|
2006-10-20 21:11:12 +02:00
|
|
|
/** Write the header for an HTTP/1.0 response onto <b>conn</b>-\>outbuf,
|
2006-10-20 01:05:34 +02:00
|
|
|
* with <b>type</b> as the Content-Type.
|
|
|
|
*
|
|
|
|
* If <b>length</b> is nonnegative, it is the Content-Length.
|
|
|
|
* If <b>encoding</b> is provided, it is the Content-Encoding.
|
|
|
|
* If <b>cache_lifetime</b> is greater than 0, the content may be cached for
|
|
|
|
* up to cache_lifetime seconds. Otherwise, the content may not be cached. */
|
2006-06-30 12:50:43 +02:00
|
|
|
static void
|
2007-08-15 21:55:57 +02:00
|
|
|
write_http_response_header_impl(dir_connection_t *conn, ssize_t length,
|
2006-10-20 01:05:34 +02:00
|
|
|
const char *type, const char *encoding,
|
2007-11-01 14:40:29 +01:00
|
|
|
const char *extra_headers,
|
2008-02-22 20:09:45 +01:00
|
|
|
long cache_lifetime)
|
2006-06-30 12:50:43 +02:00
|
|
|
{
|
|
|
|
char date[RFC1123_TIME_LEN+1];
|
2006-10-20 01:05:34 +02:00
|
|
|
time_t now = time(NULL);
|
2017-09-06 15:08:55 +02:00
|
|
|
buf_t *buf = buf_new_with_capacity(1024);
|
2006-06-30 12:50:43 +02:00
|
|
|
|
|
|
|
tor_assert(conn);
|
|
|
|
|
2006-10-20 01:05:34 +02:00
|
|
|
format_rfc1123_time(date, now);
|
2017-09-06 15:08:55 +02:00
|
|
|
|
|
|
|
buf_add_printf(buf, "HTTP/1.0 200 OK\r\nDate: %s\r\n", date);
|
2007-11-01 06:01:24 +01:00
|
|
|
if (type) {
|
2017-09-06 15:08:55 +02:00
|
|
|
buf_add_printf(buf, "Content-Type: %s\r\n", type);
|
2007-11-01 06:01:24 +01:00
|
|
|
}
|
2012-10-12 18:22:13 +02:00
|
|
|
if (!is_local_addr(&conn->base_.addr)) {
|
2008-02-11 06:31:56 +01:00
|
|
|
/* Don't report the source address for a nearby/private connection.
|
|
|
|
* Otherwise we tend to mis-report in cases where incoming ports are
|
|
|
|
* being forwarded to a Tor server running behind the firewall. */
|
2017-09-06 15:08:55 +02:00
|
|
|
buf_add_printf(buf, X_ADDRESS_HEADER "%s\r\n", conn->base_.address);
|
2006-12-25 04:42:38 +01:00
|
|
|
}
|
2006-06-30 12:50:43 +02:00
|
|
|
if (encoding) {
|
2017-09-06 15:08:55 +02:00
|
|
|
buf_add_printf(buf, "Content-Encoding: %s\r\n", encoding);
|
2006-06-30 12:50:43 +02:00
|
|
|
}
|
|
|
|
if (length >= 0) {
|
2017-09-06 15:08:55 +02:00
|
|
|
buf_add_printf(buf, "Content-Length: %ld\r\n", (long)length);
|
2006-06-30 12:50:43 +02:00
|
|
|
}
|
2006-10-20 01:05:34 +02:00
|
|
|
if (cache_lifetime > 0) {
|
|
|
|
char expbuf[RFC1123_TIME_LEN+1];
|
2014-05-12 01:16:06 +02:00
|
|
|
format_rfc1123_time(expbuf, (time_t)(now + cache_lifetime));
|
2006-10-20 01:05:34 +02:00
|
|
|
/* We could say 'Cache-control: max-age=%d' here if we start doing
|
|
|
|
* http/1.1 */
|
2017-09-06 15:08:55 +02:00
|
|
|
buf_add_printf(buf, "Expires: %s\r\n", expbuf);
|
2007-11-01 06:01:24 +01:00
|
|
|
} else if (cache_lifetime == 0) {
|
2006-10-20 01:05:34 +02:00
|
|
|
/* We could say 'Cache-control: no-cache' here if we start doing
|
|
|
|
* http/1.1 */
|
2017-09-06 15:08:55 +02:00
|
|
|
buf_add_string(buf, "Pragma: no-cache\r\n");
|
2006-10-20 01:05:34 +02:00
|
|
|
}
|
2009-01-28 17:06:46 +01:00
|
|
|
if (extra_headers) {
|
2017-09-06 15:08:55 +02:00
|
|
|
buf_add_string(buf, extra_headers);
|
2009-01-28 17:06:46 +01:00
|
|
|
}
|
2017-09-06 15:08:55 +02:00
|
|
|
buf_add_string(buf, "\r\n");
|
|
|
|
|
|
|
|
connection_buf_add_buf(TO_CONN(conn), buf);
|
|
|
|
buf_free(buf);
|
2006-06-30 12:50:43 +02:00
|
|
|
}
|
|
|
|
|
2007-08-15 21:56:01 +02:00
|
|
|
/** As write_http_response_header_impl, but sets encoding and content-typed
|
2008-01-26 02:05:16 +01:00
|
|
|
* based on whether the response will be <b>compressed</b> or not. */
|
2007-08-15 21:55:57 +02:00
|
|
|
static void
|
2017-05-04 17:25:33 +02:00
|
|
|
write_http_response_headers(dir_connection_t *conn, ssize_t length,
|
|
|
|
compress_method_t method,
|
|
|
|
const char *extra_headers, long cache_lifetime)
|
2007-08-15 21:55:57 +02:00
|
|
|
{
|
2017-04-28 20:42:22 +02:00
|
|
|
const char *methodname = compression_method_get_name(method);
|
|
|
|
const char *doctype;
|
|
|
|
if (method == NO_METHOD)
|
|
|
|
doctype = "text/plain";
|
|
|
|
else
|
|
|
|
doctype = "application/octet-stream";
|
2007-08-15 21:55:57 +02:00
|
|
|
write_http_response_header_impl(conn, length,
|
2017-04-28 20:42:22 +02:00
|
|
|
doctype,
|
|
|
|
methodname,
|
2017-05-04 17:25:33 +02:00
|
|
|
extra_headers,
|
2017-04-28 20:42:22 +02:00
|
|
|
cache_lifetime);
|
2007-08-15 21:55:57 +02:00
|
|
|
}
|
|
|
|
|
2017-05-04 17:25:33 +02:00
|
|
|
/** As write_http_response_headers, but assumes extra_headers is NULL */
|
|
|
|
static void
|
|
|
|
write_http_response_header(dir_connection_t *conn, ssize_t length,
|
|
|
|
compress_method_t method,
|
|
|
|
long cache_lifetime)
|
|
|
|
{
|
|
|
|
write_http_response_headers(conn, length, method, NULL, cache_lifetime);
|
|
|
|
}
|
|
|
|
|
2017-04-28 20:36:24 +02:00
|
|
|
/** Array of compression methods to use (if supported) for serving
|
|
|
|
* precompressed data, ordered from best to worst. */
|
|
|
|
static compress_method_t srv_meth_pref_precompressed[] = {
|
|
|
|
LZMA_METHOD,
|
|
|
|
ZSTD_METHOD,
|
|
|
|
ZLIB_METHOD,
|
|
|
|
GZIP_METHOD,
|
|
|
|
NO_METHOD
|
|
|
|
};
|
|
|
|
|
2017-05-12 13:55:18 +02:00
|
|
|
/** Array of compression methods to use (if supported) for serving
|
|
|
|
* streamed data, ordered from best to worst. */
|
|
|
|
static compress_method_t srv_meth_pref_streaming_compression[] = {
|
|
|
|
ZSTD_METHOD,
|
|
|
|
ZLIB_METHOD,
|
|
|
|
GZIP_METHOD,
|
|
|
|
NO_METHOD
|
|
|
|
};
|
|
|
|
|
2017-05-22 16:45:12 +02:00
|
|
|
/** Array of allowed compression methods to use (if supported) when receiving a
|
|
|
|
* response from a request that was required to be anonymous. */
|
|
|
|
static compress_method_t client_meth_allowed_anonymous_compression[] = {
|
|
|
|
ZLIB_METHOD,
|
|
|
|
GZIP_METHOD,
|
|
|
|
NO_METHOD
|
|
|
|
};
|
|
|
|
|
2017-04-25 22:44:24 +02:00
|
|
|
/** Parse the compression methods listed in an Accept-Encoding header <b>h</b>,
|
|
|
|
* and convert them to a bitfield where compression method x is supported if
|
|
|
|
* and only if 1 << x is set in the bitfield. */
|
2017-04-26 01:00:31 +02:00
|
|
|
STATIC unsigned
|
2017-04-25 22:44:24 +02:00
|
|
|
parse_accept_encoding_header(const char *h)
|
|
|
|
{
|
|
|
|
unsigned result = (1u << NO_METHOD);
|
|
|
|
smartlist_t *methods = smartlist_new();
|
|
|
|
smartlist_split_string(methods, h, ",",
|
|
|
|
SPLIT_SKIP_SPACE|SPLIT_STRIP_SPACE|SPLIT_IGNORE_BLANK, 0);
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(methods, const char *, m) {
|
|
|
|
compress_method_t method = compression_method_get_by_name(m);
|
|
|
|
if (method != UNKNOWN_METHOD) {
|
2017-04-27 17:58:26 +02:00
|
|
|
tor_assert(((unsigned)method) < 8*sizeof(unsigned));
|
2017-04-25 22:44:24 +02:00
|
|
|
result |= (1u << method);
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(m);
|
|
|
|
SMARTLIST_FOREACH_BEGIN(methods, char *, m) {
|
|
|
|
tor_free(m);
|
|
|
|
} SMARTLIST_FOREACH_END(m);
|
|
|
|
smartlist_free(methods);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-05-12 13:37:29 +02:00
|
|
|
/** Array of compression methods to use (if supported) for requesting
|
|
|
|
* compressed data, ordered from best to worst. */
|
|
|
|
static compress_method_t client_meth_pref[] = {
|
|
|
|
LZMA_METHOD,
|
|
|
|
ZSTD_METHOD,
|
|
|
|
ZLIB_METHOD,
|
|
|
|
GZIP_METHOD,
|
|
|
|
NO_METHOD
|
|
|
|
};
|
|
|
|
|
2017-05-11 01:43:37 +02:00
|
|
|
/** Return a newly allocated string containing a comma separated list of
|
|
|
|
* supported encodings. */
|
|
|
|
STATIC char *
|
|
|
|
accept_encoding_header(void)
|
|
|
|
{
|
|
|
|
smartlist_t *methods = smartlist_new();
|
|
|
|
char *header = NULL;
|
|
|
|
compress_method_t method;
|
2017-05-12 13:37:29 +02:00
|
|
|
unsigned i;
|
2017-05-11 01:43:37 +02:00
|
|
|
|
2017-05-12 13:37:29 +02:00
|
|
|
for (i = 0; i < ARRAY_LENGTH(client_meth_pref); ++i) {
|
|
|
|
method = client_meth_pref[i];
|
2017-05-11 01:43:37 +02:00
|
|
|
if (tor_compress_supports_method(method))
|
|
|
|
smartlist_add(methods, (char *)compression_method_get_name(method));
|
|
|
|
}
|
|
|
|
|
|
|
|
header = smartlist_join_strings(methods, ", ", 0, NULL);
|
|
|
|
smartlist_free(methods);
|
|
|
|
|
|
|
|
return header;
|
|
|
|
}
|
|
|
|
|
2010-01-12 20:05:12 +01:00
|
|
|
/** Decide whether a client would accept the consensus we have.
|
2008-04-24 17:43:25 +02:00
|
|
|
*
|
|
|
|
* Clients can say they only want a consensus if it's signed by more
|
|
|
|
* than half the authorities in a list. They pass this list in
|
|
|
|
* the url as "...consensus/<b>fpr</b>+<b>fpr</b>+<b>fpr</b>".
|
|
|
|
*
|
2008-12-22 15:56:16 +01:00
|
|
|
* <b>fpr</b> may be an abbreviated fingerprint, i.e. only a left substring
|
2008-04-24 17:43:25 +02:00
|
|
|
* of the full authority identity digest. (Only strings of even length,
|
|
|
|
* i.e. encodings of full bytes, are handled correctly. In the case
|
|
|
|
* of an odd number of hex digits the last one is silently ignored.)
|
|
|
|
*
|
|
|
|
* Returns 1 if more than half of the requested authorities signed the
|
|
|
|
* consensus, 0 otherwise.
|
|
|
|
*/
|
|
|
|
int
|
2017-05-12 21:17:09 +02:00
|
|
|
client_likes_consensus(const struct consensus_cache_entry_t *ent,
|
|
|
|
const char *want_url)
|
2008-04-24 17:43:25 +02:00
|
|
|
{
|
2017-05-12 21:17:09 +02:00
|
|
|
smartlist_t *voters = smartlist_new();
|
2008-04-24 17:43:25 +02:00
|
|
|
int need_at_least;
|
|
|
|
int have = 0;
|
|
|
|
|
2017-05-15 01:43:41 +02:00
|
|
|
if (consensus_cache_entry_get_voter_id_digests(ent, voters) != 0) {
|
|
|
|
return 1; // We don't know the voters; assume the client won't mind. */
|
|
|
|
}
|
|
|
|
|
|
|
|
smartlist_t *want_authorities = smartlist_new();
|
2009-10-18 21:45:57 +02:00
|
|
|
dir_split_resource_into_fingerprints(want_url, want_authorities, NULL, 0);
|
2008-04-24 17:43:25 +02:00
|
|
|
need_at_least = smartlist_len(want_authorities)/2+1;
|
2017-05-12 21:17:09 +02:00
|
|
|
|
2017-05-15 01:43:41 +02:00
|
|
|
SMARTLIST_FOREACH_BEGIN(want_authorities, const char *, want_digest) {
|
2008-04-24 17:43:25 +02:00
|
|
|
|
2017-05-15 01:43:41 +02:00
|
|
|
SMARTLIST_FOREACH_BEGIN(voters, const char *, digest) {
|
|
|
|
if (!strcasecmpstart(digest, want_digest)) {
|
2008-04-24 17:43:25 +02:00
|
|
|
have++;
|
|
|
|
break;
|
|
|
|
};
|
2017-05-15 01:43:41 +02:00
|
|
|
} SMARTLIST_FOREACH_END(digest);
|
2008-04-24 17:43:25 +02:00
|
|
|
|
|
|
|
/* early exit, if we already have enough */
|
|
|
|
if (have >= need_at_least)
|
|
|
|
break;
|
2017-05-15 01:43:41 +02:00
|
|
|
} SMARTLIST_FOREACH_END(want_digest);
|
2008-04-24 17:43:25 +02:00
|
|
|
|
|
|
|
SMARTLIST_FOREACH(want_authorities, char *, d, tor_free(d));
|
|
|
|
smartlist_free(want_authorities);
|
2017-05-15 01:43:41 +02:00
|
|
|
SMARTLIST_FOREACH(voters, char *, cp, tor_free(cp));
|
2017-05-12 21:17:09 +02:00
|
|
|
smartlist_free(voters);
|
2008-04-24 17:43:25 +02:00
|
|
|
return (have >= need_at_least);
|
|
|
|
}
|
|
|
|
|
2014-11-17 17:43:50 +01:00
|
|
|
/** Return the compression level we should use for sending a compressed
|
|
|
|
* response of size <b>n_bytes</b>. */
|
2017-04-17 14:22:13 +02:00
|
|
|
STATIC compression_level_t
|
2014-11-17 17:43:50 +01:00
|
|
|
choose_compression_level(ssize_t n_bytes)
|
|
|
|
{
|
|
|
|
if (! have_been_under_memory_pressure()) {
|
|
|
|
return HIGH_COMPRESSION; /* we have plenty of RAM. */
|
|
|
|
} else if (n_bytes < 0) {
|
|
|
|
return HIGH_COMPRESSION; /* unknown; might be big. */
|
|
|
|
} else if (n_bytes < 1024) {
|
|
|
|
return LOW_COMPRESSION;
|
|
|
|
} else if (n_bytes < 2048) {
|
|
|
|
return MEDIUM_COMPRESSION;
|
|
|
|
} else {
|
|
|
|
return HIGH_COMPRESSION;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
/** Information passed to handle a GET request. */
|
|
|
|
typedef struct get_handler_args_t {
|
2017-04-25 22:44:24 +02:00
|
|
|
/** Bitmask of compression methods that the client said (or implied) it
|
|
|
|
* supported. */
|
|
|
|
unsigned compression_supported;
|
2016-05-10 19:48:17 +02:00
|
|
|
/** If nonzero, the time included an if-modified-since header with this
|
|
|
|
* value. */
|
|
|
|
time_t if_modified_since;
|
|
|
|
/** String containing the requested URL or resource. */
|
|
|
|
const char *url;
|
|
|
|
/** String containing the HTTP headers */
|
|
|
|
const char *headers;
|
|
|
|
} get_handler_args_t;
|
|
|
|
|
|
|
|
/** Entry for handling an HTTP GET request.
|
|
|
|
*
|
|
|
|
* This entry matches a request if "string" is equal to the requested
|
|
|
|
* resource, or if "is_prefix" is true and "string" is a prefix of the
|
|
|
|
* requested resource.
|
|
|
|
*
|
|
|
|
* The 'handler' function is called to handle the request. It receives
|
|
|
|
* an arguments structure, and must return 0 on success or -1 if we should
|
|
|
|
* close the connection.
|
|
|
|
**/
|
|
|
|
typedef struct url_table_ent_s {
|
|
|
|
const char *string;
|
|
|
|
int is_prefix;
|
|
|
|
int (*handler)(dir_connection_t *conn, const get_handler_args_t *args);
|
|
|
|
} url_table_ent_t;
|
|
|
|
|
|
|
|
static int handle_get_frontpage(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args);
|
|
|
|
static int handle_get_current_consensus(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args);
|
|
|
|
static int handle_get_status_vote(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args);
|
|
|
|
static int handle_get_microdesc(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args);
|
|
|
|
static int handle_get_descriptor(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args);
|
|
|
|
static int handle_get_keys(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args);
|
2016-08-25 17:04:59 +02:00
|
|
|
static int handle_get_hs_descriptor_v2(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args);
|
2016-05-10 19:48:17 +02:00
|
|
|
static int handle_get_robots(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args);
|
|
|
|
static int handle_get_networkstatus_bridges(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args);
|
|
|
|
|
|
|
|
/** Table for handling GET requests. */
|
|
|
|
static const url_table_ent_t url_table[] = {
|
|
|
|
{ "/tor/", 0, handle_get_frontpage },
|
|
|
|
{ "/tor/status-vote/current/consensus", 1, handle_get_current_consensus },
|
|
|
|
{ "/tor/status-vote/current/", 1, handle_get_status_vote },
|
|
|
|
{ "/tor/status-vote/next/", 1, handle_get_status_vote },
|
|
|
|
{ "/tor/micro/d/", 1, handle_get_microdesc },
|
|
|
|
{ "/tor/server/", 1, handle_get_descriptor },
|
|
|
|
{ "/tor/extra/", 1, handle_get_descriptor },
|
|
|
|
{ "/tor/keys/", 1, handle_get_keys },
|
2016-08-25 17:04:59 +02:00
|
|
|
{ "/tor/rendezvous2/", 1, handle_get_hs_descriptor_v2 },
|
|
|
|
{ "/tor/hs/3/", 1, handle_get_hs_descriptor_v3 },
|
2016-05-10 19:48:17 +02:00
|
|
|
{ "/tor/robots.txt", 0, handle_get_robots },
|
|
|
|
{ "/tor/networkstatus-bridges", 0, handle_get_networkstatus_bridges },
|
|
|
|
{ NULL, 0, NULL },
|
|
|
|
};
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Helper function: called when a dirserver gets a complete HTTP GET
|
2004-05-06 13:08:04 +02:00
|
|
|
* request. Look for a request for a directory or for a rendezvous
|
|
|
|
* service descriptor. On finding one, write a response into
|
2016-05-11 19:39:11 +02:00
|
|
|
* conn-\>outbuf. If the request is unrecognized, send a 404.
|
|
|
|
* Return 0 if we handled this successfully, or -1 if we need to close
|
|
|
|
* the connection. */
|
2016-12-12 12:53:11 +01:00
|
|
|
MOCK_IMPL(STATIC int,
|
|
|
|
directory_handle_command_get,(dir_connection_t *conn, const char *headers,
|
|
|
|
const char *req_body, size_t req_body_len))
|
2004-05-13 01:48:57 +02:00
|
|
|
{
|
2007-08-15 21:55:57 +02:00
|
|
|
char *url, *url_mem, *header;
|
2007-06-02 17:26:57 +02:00
|
|
|
time_t if_modified_since = 0;
|
2017-04-25 22:44:24 +02:00
|
|
|
int zlib_compressed_in_url;
|
|
|
|
unsigned compression_methods_supported;
|
2007-06-02 17:26:57 +02:00
|
|
|
|
2006-06-05 00:42:13 +02:00
|
|
|
/* We ignore the body of a GET request. */
|
2011-07-01 17:33:07 +02:00
|
|
|
(void)req_body;
|
|
|
|
(void)req_body_len;
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2006-02-13 10:37:53 +01:00
|
|
|
log_debug(LD_DIRSERV,"Received GET command.");
|
2003-12-17 10:42:28 +01:00
|
|
|
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.state = DIR_CONN_STATE_SERVER_WRITING;
|
2003-12-17 10:42:28 +01:00
|
|
|
|
2004-03-31 00:57:49 +02:00
|
|
|
if (parse_http_url(headers, &url) < 0) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 400, "Bad request");
|
2004-03-12 13:43:13 +01:00
|
|
|
return 0;
|
2003-12-17 10:42:28 +01:00
|
|
|
}
|
2007-06-02 17:26:57 +02:00
|
|
|
if ((header = http_get_header(headers, "If-Modified-Since: "))) {
|
|
|
|
struct tm tm;
|
|
|
|
if (parse_http_time(header, &tm) == 0) {
|
2014-12-20 11:59:17 +01:00
|
|
|
if (tor_timegm(&tm, &if_modified_since)<0) {
|
2012-09-11 16:41:59 +02:00
|
|
|
if_modified_since = 0;
|
2014-12-20 11:59:17 +01:00
|
|
|
} else {
|
|
|
|
log_debug(LD_DIRSERV, "If-Modified-Since is '%s'.", escaped(header));
|
|
|
|
}
|
2007-06-02 17:26:57 +02:00
|
|
|
}
|
|
|
|
/* The correct behavior on a malformed If-Modified-Since header is to
|
|
|
|
* act as if no If-Modified-Since header had been given. */
|
|
|
|
tor_free(header);
|
|
|
|
}
|
2014-09-09 16:22:01 +02:00
|
|
|
log_debug(LD_DIRSERV,"rewritten url as '%s'.", escaped(url));
|
2003-12-17 10:42:28 +01:00
|
|
|
|
2007-08-15 21:55:57 +02:00
|
|
|
url_mem = url;
|
2017-09-12 23:20:09 +02:00
|
|
|
{
|
|
|
|
size_t url_len = strlen(url);
|
2017-04-25 22:44:24 +02:00
|
|
|
|
2017-09-12 23:20:09 +02:00
|
|
|
zlib_compressed_in_url = url_len > 2 && !strcmp(url+url_len-2, ".z");
|
|
|
|
if (zlib_compressed_in_url) {
|
|
|
|
url[url_len-2] = '\0';
|
|
|
|
}
|
2007-08-15 21:55:57 +02:00
|
|
|
}
|
|
|
|
|
2017-05-04 14:57:34 +02:00
|
|
|
if ((header = http_get_header(headers, "Accept-Encoding: "))) {
|
2017-04-25 22:44:24 +02:00
|
|
|
compression_methods_supported = parse_accept_encoding_header(header);
|
|
|
|
tor_free(header);
|
|
|
|
} else {
|
|
|
|
compression_methods_supported = (1u << NO_METHOD);
|
2017-05-10 17:09:52 +02:00
|
|
|
}
|
|
|
|
if (zlib_compressed_in_url) {
|
|
|
|
compression_methods_supported |= (1u << ZLIB_METHOD);
|
2017-04-25 22:44:24 +02:00
|
|
|
}
|
|
|
|
|
2017-04-26 01:07:17 +02:00
|
|
|
/* Remove all methods that we don't both support. */
|
2017-04-27 17:30:51 +02:00
|
|
|
compression_methods_supported &= tor_compress_get_supported_method_bitmask();
|
2017-04-26 01:07:17 +02:00
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
get_handler_args_t args;
|
|
|
|
args.url = url;
|
|
|
|
args.headers = headers;
|
|
|
|
args.if_modified_since = if_modified_since;
|
2017-04-25 22:44:24 +02:00
|
|
|
args.compression_supported = compression_methods_supported;
|
2016-05-10 19:48:17 +02:00
|
|
|
|
|
|
|
int i, result = -1;
|
|
|
|
for (i = 0; url_table[i].string; ++i) {
|
|
|
|
int match;
|
|
|
|
if (url_table[i].is_prefix) {
|
|
|
|
match = !strcmpstart(url, url_table[i].string);
|
|
|
|
} else {
|
|
|
|
match = !strcmp(url, url_table[i].string);
|
|
|
|
}
|
|
|
|
if (match) {
|
|
|
|
result = url_table[i].handler(conn, &args);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we didn't recognize the url */
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2016-05-10 19:48:17 +02:00
|
|
|
result = 0;
|
|
|
|
|
|
|
|
done:
|
|
|
|
tor_free(url_mem);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Helper function for GET / or GET /tor/
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
handle_get_frontpage(dir_connection_t *conn, const get_handler_args_t *args)
|
|
|
|
{
|
2016-05-11 21:31:48 +02:00
|
|
|
(void) args; /* unused */
|
2016-05-27 16:01:37 +02:00
|
|
|
const char *frontpage = get_dirportfrontpage();
|
|
|
|
|
|
|
|
if (frontpage) {
|
|
|
|
size_t dlen;
|
|
|
|
dlen = strlen(frontpage);
|
|
|
|
/* Let's return a disclaimer page (users shouldn't use V1 anymore,
|
|
|
|
and caches don't fetch '/', so this is safe). */
|
|
|
|
|
|
|
|
/* [We don't check for write_bucket_low here, since we want to serve
|
|
|
|
* this page no matter what.] */
|
|
|
|
write_http_response_header_impl(conn, dlen, "text/html", "identity",
|
|
|
|
NULL, DIRPORTFRONTPAGE_CACHE_LIFETIME);
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(frontpage, dlen, TO_CONN(conn));
|
2016-05-27 16:01:37 +02:00
|
|
|
} else {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2008-12-07 02:21:19 +01:00
|
|
|
}
|
2016-05-10 19:48:17 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-15 12:34:12 +02:00
|
|
|
/** Warn that the cached consensus <b>consensus</b> of type
|
2017-05-12 21:17:09 +02:00
|
|
|
* <b>flavor</b> is too old and will not be served to clients. Rate-limit the
|
|
|
|
* warning to avoid logging an entry on every request.
|
2016-11-09 22:16:18 +01:00
|
|
|
*/
|
|
|
|
static void
|
2017-05-15 12:34:12 +02:00
|
|
|
warn_consensus_is_too_old(const struct consensus_cache_entry_t *consensus,
|
2017-05-12 21:17:09 +02:00
|
|
|
const char *flavor, time_t now)
|
2016-11-09 22:16:18 +01:00
|
|
|
{
|
|
|
|
#define TOO_OLD_WARNING_INTERVAL (60*60)
|
|
|
|
static ratelim_t warned = RATELIM_INIT(TOO_OLD_WARNING_INTERVAL);
|
|
|
|
char timestamp[ISO_TIME_LEN+1];
|
2017-05-12 21:17:09 +02:00
|
|
|
time_t valid_until;
|
2016-11-09 22:16:18 +01:00
|
|
|
char *dupes;
|
|
|
|
|
2017-05-15 12:34:12 +02:00
|
|
|
if (consensus_cache_entry_get_valid_until(consensus, &valid_until))
|
2017-05-12 21:17:09 +02:00
|
|
|
return;
|
|
|
|
|
2016-11-09 22:16:18 +01:00
|
|
|
if ((dupes = rate_limit_log(&warned, now))) {
|
2017-05-12 21:17:09 +02:00
|
|
|
format_local_iso_time(timestamp, valid_until);
|
2016-11-09 22:16:18 +01:00
|
|
|
log_warn(LD_DIRSERV, "Our %s%sconsensus is too old, so we will not "
|
|
|
|
"serve it to clients. It was valid until %s local time and we "
|
|
|
|
"continued to serve it for up to 24 hours after it expired.%s",
|
|
|
|
flavor ? flavor : "", flavor ? " " : "", timestamp, dupes);
|
|
|
|
tor_free(dupes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-04 16:53:22 +02:00
|
|
|
/**
|
|
|
|
* Parse a single hex-encoded sha3-256 digest from <b>hex</b> into
|
|
|
|
* <b>digest</b>. Return 0 on success. On failure, report that the hash came
|
|
|
|
* from <b>location</b>, report that we are taking <b>action</b> with it, and
|
|
|
|
* return -1.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
parse_one_diff_hash(uint8_t *digest, const char *hex, const char *location,
|
|
|
|
const char *action)
|
|
|
|
{
|
|
|
|
if (base16_decode((char*)digest, DIGEST256_LEN, hex, strlen(hex)) ==
|
|
|
|
DIGEST256_LEN) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
log_fn(LOG_PROTOCOL_WARN, LD_DIR,
|
|
|
|
"%s contained bogus digest %s; %s.",
|
|
|
|
location, escaped(hex), action);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-28 20:36:24 +02:00
|
|
|
/** If there is an X-Or-Diff-From-Consensus header included in <b>headers</b>,
|
|
|
|
* set <b>digest_out<b> to a new smartlist containing every 256-bit
|
|
|
|
* hex-encoded digest listed in that header and return 0. Otherwise return
|
|
|
|
* -1. */
|
|
|
|
static int
|
|
|
|
parse_or_diff_from_header(smartlist_t **digests_out, const char *headers)
|
|
|
|
{
|
2017-05-03 20:29:06 +02:00
|
|
|
char *hdr = http_get_header(headers, X_OR_DIFF_FROM_CONSENSUS_HEADER);
|
2017-04-28 20:36:24 +02:00
|
|
|
if (hdr == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
smartlist_t *hex_digests = smartlist_new();
|
|
|
|
*digests_out = smartlist_new();
|
|
|
|
smartlist_split_string(hex_digests, hdr, " ",
|
|
|
|
SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, -1);
|
|
|
|
SMARTLIST_FOREACH_BEGIN(hex_digests, const char *, hex) {
|
|
|
|
uint8_t digest[DIGEST256_LEN];
|
2017-05-04 16:53:22 +02:00
|
|
|
if (!parse_one_diff_hash(digest, hex, "X-Or-Diff-From-Consensus header",
|
|
|
|
"ignoring")) {
|
2017-04-28 20:36:24 +02:00
|
|
|
smartlist_add(*digests_out, tor_memdup(digest, sizeof(digest)));
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(hex);
|
|
|
|
SMARTLIST_FOREACH(hex_digests, char *, cp, tor_free(cp));
|
|
|
|
smartlist_free(hex_digests);
|
2017-05-05 11:35:12 +02:00
|
|
|
tor_free(hdr);
|
2017-04-28 20:36:24 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-15 15:13:28 +02:00
|
|
|
/** Fallback compression method. The fallback compression method is used in
|
|
|
|
* case a client requests a non-compressed document. We only store compressed
|
|
|
|
* documents, so we use this compression method to fetch the document and let
|
|
|
|
* the spooling system do the streaming decompression.
|
|
|
|
*/
|
|
|
|
#define FALLBACK_COMPRESS_METHOD ZLIB_METHOD
|
|
|
|
|
2017-04-28 20:36:24 +02:00
|
|
|
/**
|
|
|
|
* Try to find the best consensus diff possible in order to serve a client
|
|
|
|
* request for a diff from one of the consensuses in <b>digests</b> to the
|
|
|
|
* current consensus of flavor <b>flav</b>. The client supports the
|
|
|
|
* compression methods listed in the <b>compression_methods</b> bitfield:
|
|
|
|
* place the method chosen (if any) into <b>compression_used_out</b>.
|
|
|
|
*/
|
|
|
|
static struct consensus_cache_entry_t *
|
|
|
|
find_best_diff(const smartlist_t *digests, int flav,
|
|
|
|
unsigned compression_methods,
|
|
|
|
compress_method_t *compression_used_out)
|
|
|
|
{
|
|
|
|
struct consensus_cache_entry_t *result = NULL;
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(digests, const uint8_t *, diff_from) {
|
|
|
|
unsigned u;
|
|
|
|
for (u = 0; u < ARRAY_LENGTH(srv_meth_pref_precompressed); ++u) {
|
|
|
|
compress_method_t method = srv_meth_pref_precompressed[u];
|
|
|
|
if (0 == (compression_methods & (1u<<method)))
|
|
|
|
continue; // client doesn't like this one, or we don't have it.
|
|
|
|
if (consdiffmgr_find_diff_from(&result, flav, DIGEST_SHA3_256,
|
|
|
|
diff_from, DIGEST256_LEN,
|
|
|
|
method) == CONSDIFF_AVAILABLE) {
|
|
|
|
tor_assert_nonfatal(result);
|
|
|
|
*compression_used_out = method;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(diff_from);
|
2017-05-15 15:13:28 +02:00
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(digests, const uint8_t *, diff_from) {
|
|
|
|
if (consdiffmgr_find_diff_from(&result, flav, DIGEST_SHA3_256, diff_from,
|
|
|
|
DIGEST256_LEN, FALLBACK_COMPRESS_METHOD) == CONSDIFF_AVAILABLE) {
|
|
|
|
tor_assert_nonfatal(result);
|
|
|
|
*compression_used_out = FALLBACK_COMPRESS_METHOD;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(diff_from);
|
|
|
|
|
2017-04-28 20:36:24 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-12 21:17:09 +02:00
|
|
|
/** Lookup the cached consensus document by the flavor found in <b>flav</b>.
|
2018-01-24 09:55:15 +01:00
|
|
|
* The preferred set of compression methods should be listed in the
|
2017-05-12 21:17:09 +02:00
|
|
|
* <b>compression_methods</b> bitfield. The compression method chosen (if any)
|
|
|
|
* is stored in <b>compression_used_out</b>. */
|
|
|
|
static struct consensus_cache_entry_t *
|
|
|
|
find_best_consensus(int flav,
|
|
|
|
unsigned compression_methods,
|
|
|
|
compress_method_t *compression_used_out)
|
|
|
|
{
|
|
|
|
struct consensus_cache_entry_t *result = NULL;
|
|
|
|
unsigned u;
|
|
|
|
|
|
|
|
for (u = 0; u < ARRAY_LENGTH(srv_meth_pref_precompressed); ++u) {
|
|
|
|
compress_method_t method = srv_meth_pref_precompressed[u];
|
|
|
|
|
|
|
|
if (0 == (compression_methods & (1u<<method)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (consdiffmgr_find_consensus(&result, flav,
|
|
|
|
method) == CONSDIFF_AVAILABLE) {
|
|
|
|
tor_assert_nonfatal(result);
|
|
|
|
*compression_used_out = method;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-15 15:13:28 +02:00
|
|
|
if (consdiffmgr_find_consensus(&result, flav,
|
|
|
|
FALLBACK_COMPRESS_METHOD) == CONSDIFF_AVAILABLE) {
|
|
|
|
tor_assert_nonfatal(result);
|
|
|
|
*compression_used_out = FALLBACK_COMPRESS_METHOD;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-05-12 21:17:09 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-11 03:32:51 +02:00
|
|
|
/** Try to find the best supported compression method possible from a given
|
|
|
|
* <b>compression_methods</b>. Return NO_METHOD if no mutually supported
|
|
|
|
* compression method could be found. */
|
|
|
|
static compress_method_t
|
2017-05-12 13:55:18 +02:00
|
|
|
find_best_compression_method(unsigned compression_methods, int stream)
|
2017-05-11 03:32:51 +02:00
|
|
|
{
|
|
|
|
unsigned u;
|
2017-05-12 13:55:18 +02:00
|
|
|
compress_method_t *methods;
|
|
|
|
size_t length;
|
|
|
|
|
|
|
|
if (stream) {
|
|
|
|
methods = srv_meth_pref_streaming_compression;
|
|
|
|
length = ARRAY_LENGTH(srv_meth_pref_streaming_compression);
|
|
|
|
} else {
|
|
|
|
methods = srv_meth_pref_precompressed;
|
|
|
|
length = ARRAY_LENGTH(srv_meth_pref_precompressed);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (u = 0; u < length; ++u) {
|
|
|
|
compress_method_t method = methods[u];
|
2017-05-11 03:32:51 +02:00
|
|
|
if (compression_methods & (1u<<method))
|
|
|
|
return method;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NO_METHOD;
|
|
|
|
}
|
|
|
|
|
2017-06-27 19:16:44 +02:00
|
|
|
/** Check if any of the digests in <b>digests</b> matches the latest consensus
|
|
|
|
* flavor (given in <b>flavor</b>) that we have available. */
|
|
|
|
static int
|
|
|
|
digest_list_contains_best_consensus(consensus_flavor_t flavor,
|
|
|
|
const smartlist_t *digests)
|
|
|
|
{
|
|
|
|
const networkstatus_t *ns = NULL;
|
|
|
|
|
|
|
|
if (digests == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ns = networkstatus_get_latest_consensus_by_flavor(flavor);
|
|
|
|
|
|
|
|
if (ns == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(digests, const uint8_t *, digest) {
|
|
|
|
if (tor_memeq(ns->digest_sha3_as_signed, digest, DIGEST256_LEN))
|
|
|
|
return 1;
|
|
|
|
} SMARTLIST_FOREACH_END(digest);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-22 16:45:12 +02:00
|
|
|
/** Check if the given compression method is allowed for a connection that is
|
|
|
|
* supposed to be anonymous. Returns 1 if the compression method is allowed,
|
|
|
|
* otherwise 0. */
|
|
|
|
STATIC int
|
|
|
|
allowed_anonymous_connection_compression_method(compress_method_t method)
|
|
|
|
{
|
|
|
|
unsigned u;
|
|
|
|
|
|
|
|
for (u = 0; u < ARRAY_LENGTH(client_meth_allowed_anonymous_compression);
|
|
|
|
++u) {
|
|
|
|
compress_method_t allowed_method =
|
|
|
|
client_meth_allowed_anonymous_compression[u];
|
|
|
|
|
|
|
|
if (! tor_compress_supports_method(allowed_method))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (method == allowed_method)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-23 02:06:38 +02:00
|
|
|
/** Log a warning when a remote server has sent us a document using a
|
2017-05-22 17:52:41 +02:00
|
|
|
* compression method that is not allowed for anonymous directory requests. */
|
|
|
|
STATIC void
|
|
|
|
warn_disallowed_anonymous_compression_method(compress_method_t method)
|
|
|
|
{
|
|
|
|
log_fn(LOG_PROTOCOL_WARN, LD_HTTP,
|
|
|
|
"Received a %s HTTP response, which is not "
|
|
|
|
"allowed for anonymous directory requests.",
|
|
|
|
compression_method_get_human_name(method));
|
|
|
|
}
|
|
|
|
|
2017-05-04 17:13:44 +02:00
|
|
|
/** Encodes the results of parsing a consensus request to figure out what
|
|
|
|
* consensus, and possibly what diffs, the user asked for. */
|
|
|
|
typedef struct {
|
|
|
|
/** name of the flavor to retrieve. */
|
|
|
|
char *flavor;
|
|
|
|
/** flavor to retrive, as enum. */
|
|
|
|
consensus_flavor_t flav;
|
|
|
|
/** plus-separated list of authority fingerprints; see
|
|
|
|
* client_likes_consensus(). Aliases the URL in the request passed to
|
|
|
|
* parse_consensus_request(). */
|
|
|
|
const char *want_fps;
|
|
|
|
/** Optionally, a smartlist of sha3 digests-as-signed of the consensuses
|
|
|
|
* to return a diff from. */
|
|
|
|
smartlist_t *diff_from_digests;
|
|
|
|
/** If true, never send a full consensus. If there is no diff, send
|
|
|
|
* a 404 instead. */
|
|
|
|
int diff_only;
|
|
|
|
} parsed_consensus_request_t;
|
|
|
|
|
|
|
|
/** Remove all data held in <b>req</b>. Do not free <b>req</b> itself, since
|
|
|
|
* it is stack-allocated. */
|
|
|
|
static void
|
|
|
|
parsed_consensus_request_clear(parsed_consensus_request_t *req)
|
|
|
|
{
|
|
|
|
if (!req)
|
|
|
|
return;
|
|
|
|
tor_free(req->flavor);
|
|
|
|
if (req->diff_from_digests) {
|
|
|
|
SMARTLIST_FOREACH(req->diff_from_digests, uint8_t *, d, tor_free(d));
|
|
|
|
smartlist_free(req->diff_from_digests);
|
|
|
|
}
|
|
|
|
memset(req, 0, sizeof(parsed_consensus_request_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Parse the URL and relevant headers of <b>args</b> for a current-consensus
|
|
|
|
* request to learn what flavor of consensus we want, what keys it must be
|
|
|
|
* signed with, and what diffs we would accept (or demand) instead. Return 0
|
|
|
|
* on success and -1 on failure.
|
2016-05-10 19:48:17 +02:00
|
|
|
*/
|
|
|
|
static int
|
2017-05-04 17:13:44 +02:00
|
|
|
parse_consensus_request(parsed_consensus_request_t *out,
|
|
|
|
const get_handler_args_t *args)
|
2016-05-10 19:48:17 +02:00
|
|
|
{
|
|
|
|
const char *url = args->url;
|
2017-05-04 17:13:44 +02:00
|
|
|
memset(out, 0, sizeof(parsed_consensus_request_t));
|
|
|
|
out->flav = FLAV_NS;
|
2009-10-18 21:45:57 +02:00
|
|
|
|
2017-05-04 16:53:22 +02:00
|
|
|
const char CONSENSUS_URL_PREFIX[] = "/tor/status-vote/current/consensus/";
|
|
|
|
const char CONSENSUS_FLAVORED_PREFIX[] =
|
|
|
|
"/tor/status-vote/current/consensus-";
|
|
|
|
|
2017-04-28 19:18:32 +02:00
|
|
|
/* figure out the flavor if any, and who we wanted to sign the thing */
|
2017-05-04 17:13:44 +02:00
|
|
|
const char *after_flavor = NULL;
|
|
|
|
|
2017-04-28 19:18:32 +02:00
|
|
|
if (!strcmpstart(url, CONSENSUS_FLAVORED_PREFIX)) {
|
|
|
|
const char *f, *cp;
|
|
|
|
f = url + strlen(CONSENSUS_FLAVORED_PREFIX);
|
|
|
|
cp = strchr(f, '/');
|
|
|
|
if (cp) {
|
2017-05-04 16:53:22 +02:00
|
|
|
after_flavor = cp+1;
|
2017-05-04 17:13:44 +02:00
|
|
|
out->flavor = tor_strndup(f, cp-f);
|
2017-01-02 18:16:57 +01:00
|
|
|
} else {
|
2017-05-04 17:13:44 +02:00
|
|
|
out->flavor = tor_strdup(f);
|
2017-01-02 18:16:57 +01:00
|
|
|
}
|
2017-05-04 17:13:44 +02:00
|
|
|
int flav = networkstatus_parse_flavor_name(out->flavor);
|
2017-04-28 19:18:32 +02:00
|
|
|
if (flav < 0)
|
|
|
|
flav = FLAV_NS;
|
2017-05-04 17:13:44 +02:00
|
|
|
out->flav = flav;
|
2017-04-28 19:18:32 +02:00
|
|
|
} else {
|
|
|
|
if (!strcmpstart(url, CONSENSUS_URL_PREFIX))
|
2017-05-04 16:53:22 +02:00
|
|
|
after_flavor = url+strlen(CONSENSUS_URL_PREFIX);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* see whether we've been asked explicitly for a diff from an older
|
|
|
|
* consensus. (The user might also have said that a diff would be okay,
|
|
|
|
* via X-Or-Diff-From-Consensus */
|
|
|
|
const char DIFF_COMPONENT[] = "diff/";
|
|
|
|
char *diff_hash_in_url = NULL;
|
|
|
|
if (after_flavor && !strcmpstart(after_flavor, DIFF_COMPONENT)) {
|
|
|
|
after_flavor += strlen(DIFF_COMPONENT);
|
|
|
|
const char *cp = strchr(after_flavor, '/');
|
|
|
|
if (cp) {
|
|
|
|
diff_hash_in_url = tor_strndup(after_flavor, cp-after_flavor);
|
2017-05-04 17:13:44 +02:00
|
|
|
out->want_fps = cp+1;
|
2017-05-04 16:53:22 +02:00
|
|
|
} else {
|
|
|
|
diff_hash_in_url = tor_strdup(after_flavor);
|
2017-05-04 17:13:44 +02:00
|
|
|
out->want_fps = NULL;
|
2017-05-04 16:53:22 +02:00
|
|
|
}
|
|
|
|
} else {
|
2017-05-04 17:13:44 +02:00
|
|
|
out->want_fps = after_flavor;
|
2017-04-28 19:18:32 +02:00
|
|
|
}
|
2009-10-18 21:45:57 +02:00
|
|
|
|
2017-05-04 16:53:22 +02:00
|
|
|
if (diff_hash_in_url) {
|
|
|
|
uint8_t diff_from[DIGEST256_LEN];
|
2017-05-04 17:13:44 +02:00
|
|
|
out->diff_from_digests = smartlist_new();
|
|
|
|
out->diff_only = 1;
|
2017-05-16 16:47:41 +02:00
|
|
|
int ok = !parse_one_diff_hash(diff_from, diff_hash_in_url, "URL",
|
|
|
|
"rejecting");
|
|
|
|
tor_free(diff_hash_in_url);
|
|
|
|
if (ok) {
|
2017-05-04 17:13:44 +02:00
|
|
|
smartlist_add(out->diff_from_digests,
|
|
|
|
tor_memdup(diff_from, DIGEST256_LEN));
|
|
|
|
} else {
|
|
|
|
return -1;
|
2017-05-04 16:53:22 +02:00
|
|
|
}
|
2017-05-04 17:13:44 +02:00
|
|
|
} else {
|
|
|
|
parse_or_diff_from_header(&out->diff_from_digests, args->headers);
|
2017-05-04 16:53:22 +02:00
|
|
|
}
|
|
|
|
|
2017-05-04 17:13:44 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Helper function for GET /tor/status-vote/current/consensus
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
handle_get_current_consensus(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args)
|
|
|
|
{
|
|
|
|
const compress_method_t compress_method =
|
|
|
|
find_best_compression_method(args->compression_supported, 0);
|
|
|
|
const time_t if_modified_since = args->if_modified_since;
|
|
|
|
int clear_spool = 0;
|
|
|
|
|
|
|
|
/* v3 network status fetch. */
|
|
|
|
long lifetime = NETWORKSTATUS_CACHE_LIFETIME;
|
|
|
|
|
|
|
|
time_t now = time(NULL);
|
|
|
|
parsed_consensus_request_t req;
|
|
|
|
|
|
|
|
if (parse_consensus_request(&req, args) < 0) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Couldn't parse request");
|
2017-05-04 17:13:44 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2017-06-27 19:16:44 +02:00
|
|
|
if (digest_list_contains_best_consensus(req.flav,
|
|
|
|
req.diff_from_digests)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 304, "Not modified");
|
2017-06-27 19:16:44 +02:00
|
|
|
geoip_note_ns_response(GEOIP_REJECT_NOT_MODIFIED);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2017-05-04 17:13:44 +02:00
|
|
|
struct consensus_cache_entry_t *cached_consensus = NULL;
|
|
|
|
|
|
|
|
compress_method_t compression_used = NO_METHOD;
|
|
|
|
if (req.diff_from_digests) {
|
|
|
|
cached_consensus = find_best_diff(req.diff_from_digests, req.flav,
|
2017-05-12 21:17:09 +02:00
|
|
|
args->compression_supported,
|
|
|
|
&compression_used);
|
2017-05-04 16:53:22 +02:00
|
|
|
}
|
|
|
|
|
2017-05-04 17:13:44 +02:00
|
|
|
if (req.diff_only && !cached_consensus) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "No such diff available");
|
2017-05-04 17:13:44 +02:00
|
|
|
// XXXX warn_consensus_is_too_old(v, req.flavor, now);
|
2017-05-04 16:53:22 +02:00
|
|
|
geoip_note_ns_response(GEOIP_REJECT_NOT_FOUND);
|
|
|
|
goto done;
|
2017-05-15 12:09:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (! cached_consensus) {
|
2017-05-04 17:13:44 +02:00
|
|
|
cached_consensus = find_best_consensus(req.flav,
|
2017-05-12 21:17:09 +02:00
|
|
|
args->compression_supported,
|
|
|
|
&compression_used);
|
|
|
|
}
|
2017-05-15 12:09:07 +02:00
|
|
|
|
2017-05-15 01:43:41 +02:00
|
|
|
time_t fresh_until, valid_until;
|
|
|
|
int have_fresh_until = 0, have_valid_until = 0;
|
|
|
|
if (cached_consensus) {
|
|
|
|
have_fresh_until =
|
|
|
|
!consensus_cache_entry_get_fresh_until(cached_consensus, &fresh_until);
|
|
|
|
have_valid_until =
|
|
|
|
!consensus_cache_entry_get_valid_until(cached_consensus, &valid_until);
|
|
|
|
}
|
2007-08-14 22:19:58 +02:00
|
|
|
|
2017-05-15 01:43:41 +02:00
|
|
|
if (cached_consensus && have_valid_until &&
|
|
|
|
!networkstatus_valid_until_is_reasonably_live(valid_until, now)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Consensus is too old");
|
2017-05-04 17:13:44 +02:00
|
|
|
warn_consensus_is_too_old(cached_consensus, req.flavor, now);
|
2017-04-28 19:18:32 +02:00
|
|
|
geoip_note_ns_response(GEOIP_REJECT_NOT_FOUND);
|
|
|
|
goto done;
|
|
|
|
}
|
2017-03-13 20:38:20 +01:00
|
|
|
|
2017-05-16 00:16:58 +02:00
|
|
|
if (cached_consensus && req.want_fps &&
|
|
|
|
!client_likes_consensus(cached_consensus, req.want_fps)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Consensus not signed by sufficient "
|
2017-04-28 19:18:32 +02:00
|
|
|
"number of requested authorities");
|
|
|
|
geoip_note_ns_response(GEOIP_REJECT_NOT_ENOUGH_SIGS);
|
|
|
|
goto done;
|
|
|
|
}
|
2007-06-02 17:26:57 +02:00
|
|
|
|
2017-04-28 19:18:32 +02:00
|
|
|
conn->spool = smartlist_new();
|
|
|
|
clear_spool = 1;
|
|
|
|
{
|
|
|
|
spooled_resource_t *spooled;
|
2017-05-12 21:17:09 +02:00
|
|
|
if (cached_consensus) {
|
|
|
|
spooled = spooled_resource_new_from_cache_entry(cached_consensus);
|
2017-05-15 14:50:04 +02:00
|
|
|
smartlist_add(conn->spool, spooled);
|
2017-04-28 20:36:24 +02:00
|
|
|
}
|
2017-04-28 19:18:32 +02:00
|
|
|
}
|
2017-05-12 21:17:09 +02:00
|
|
|
|
2017-05-15 01:43:41 +02:00
|
|
|
lifetime = (have_fresh_until && fresh_until > now) ? fresh_until - now : 0;
|
2017-04-28 19:18:32 +02:00
|
|
|
|
|
|
|
size_t size_guess = 0;
|
|
|
|
int n_expired = 0;
|
|
|
|
dirserv_spool_remove_missing_and_guess_size(conn, if_modified_since,
|
2017-05-11 03:32:51 +02:00
|
|
|
compress_method != NO_METHOD,
|
2017-04-28 19:18:32 +02:00
|
|
|
&size_guess,
|
|
|
|
&n_expired);
|
2008-05-29 04:29:35 +02:00
|
|
|
|
2017-04-28 19:18:32 +02:00
|
|
|
if (!smartlist_len(conn->spool) && !n_expired) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2017-04-28 19:18:32 +02:00
|
|
|
geoip_note_ns_response(GEOIP_REJECT_NOT_FOUND);
|
|
|
|
goto done;
|
|
|
|
} else if (!smartlist_len(conn->spool)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 304, "Not modified");
|
2017-04-28 19:18:32 +02:00
|
|
|
geoip_note_ns_response(GEOIP_REJECT_NOT_MODIFIED);
|
|
|
|
goto done;
|
|
|
|
}
|
2006-06-20 02:48:23 +02:00
|
|
|
|
2017-04-28 19:18:32 +02:00
|
|
|
if (global_write_bucket_low(TO_CONN(conn), size_guess, 2)) {
|
|
|
|
log_debug(LD_DIRSERV,
|
|
|
|
"Client asked for network status lists, but we've been "
|
|
|
|
"writing too many bytes lately. Sending 503 Dir busy.");
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 503, "Directory busy, try again later");
|
2017-04-28 19:18:32 +02:00
|
|
|
geoip_note_ns_response(GEOIP_REJECT_BUSY);
|
2007-08-15 21:55:57 +02:00
|
|
|
goto done;
|
2005-08-25 22:33:17 +02:00
|
|
|
}
|
|
|
|
|
2017-04-28 19:18:32 +02:00
|
|
|
tor_addr_t addr;
|
|
|
|
if (tor_addr_parse(&addr, (TO_CONN(conn))->address) >= 0) {
|
|
|
|
geoip_note_client_seen(GEOIP_CLIENT_NETWORKSTATUS,
|
|
|
|
&addr, NULL,
|
|
|
|
time(NULL));
|
|
|
|
geoip_note_ns_response(GEOIP_SUCCESS);
|
|
|
|
/* Note that a request for a network status has started, so that we
|
|
|
|
* can measure the download time later on. */
|
|
|
|
if (conn->dirreq_id)
|
|
|
|
geoip_start_dirreq(conn->dirreq_id, size_guess, DIRREQ_TUNNELED);
|
|
|
|
else
|
|
|
|
geoip_start_dirreq(TO_CONN(conn)->global_identifier, size_guess,
|
|
|
|
DIRREQ_DIRECT);
|
|
|
|
}
|
|
|
|
|
2017-05-04 17:25:33 +02:00
|
|
|
/* Use this header to tell caches that the response depends on the
|
|
|
|
* X-Or-Diff-From-Consensus header (or lack thereof). */
|
|
|
|
const char vary_header[] = "Vary: X-Or-Diff-From-Consensus\r\n";
|
|
|
|
|
2017-04-28 19:18:32 +02:00
|
|
|
clear_spool = 0;
|
2017-05-15 15:13:28 +02:00
|
|
|
|
|
|
|
// The compress_method might have been NO_METHOD, but we store the data
|
|
|
|
// compressed. Decompress them using `compression_used`. See fallback code in
|
|
|
|
// find_best_consensus() and find_best_diff().
|
2017-05-04 17:25:33 +02:00
|
|
|
write_http_response_headers(conn, -1,
|
2017-05-15 15:13:28 +02:00
|
|
|
compress_method == NO_METHOD ?
|
|
|
|
NO_METHOD : compression_used,
|
2017-05-04 17:25:33 +02:00
|
|
|
vary_header,
|
2017-04-28 19:18:32 +02:00
|
|
|
smartlist_len(conn->spool) == 1 ? lifetime : 0);
|
2017-05-12 21:17:09 +02:00
|
|
|
|
2017-05-16 00:13:38 +02:00
|
|
|
if (compress_method == NO_METHOD && smartlist_len(conn->spool))
|
2017-05-12 21:17:09 +02:00
|
|
|
conn->compress_state = tor_compress_new(0, compression_used,
|
2017-04-28 19:18:32 +02:00
|
|
|
HIGH_COMPRESSION);
|
|
|
|
|
|
|
|
/* Prime the connection with some data. */
|
|
|
|
const int initial_flush_result = connection_dirserv_flushed_some(conn);
|
|
|
|
tor_assert_nonfatal(initial_flush_result == 0);
|
|
|
|
goto done;
|
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
done:
|
2017-05-04 17:13:44 +02:00
|
|
|
parsed_consensus_request_clear(&req);
|
2017-03-13 20:38:20 +01:00
|
|
|
if (clear_spool) {
|
|
|
|
dir_conn_clear_spool(conn);
|
|
|
|
}
|
2016-05-10 19:48:17 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Helper function for GET /tor/status-vote/{current,next}/...
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
handle_get_status_vote(dir_connection_t *conn, const get_handler_args_t *args)
|
|
|
|
{
|
|
|
|
const char *url = args->url;
|
|
|
|
{
|
2008-01-25 02:34:20 +01:00
|
|
|
int current;
|
2007-08-15 21:55:52 +02:00
|
|
|
ssize_t body_len = 0;
|
2007-10-10 22:06:38 +02:00
|
|
|
ssize_t estimated_len = 0;
|
2017-06-20 17:26:51 +02:00
|
|
|
/* This smartlist holds strings that we can compress on the fly. */
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *items = smartlist_new();
|
2017-06-20 17:26:51 +02:00
|
|
|
/* This smartlist holds cached_dir_t objects that have a precompressed
|
|
|
|
* deflated version. */
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *dir_items = smartlist_new();
|
2016-05-30 22:18:16 +02:00
|
|
|
int lifetime = 60; /* XXXX?? should actually use vote intervals. */
|
2007-08-15 21:55:52 +02:00
|
|
|
url += strlen("/tor/status-vote/");
|
|
|
|
current = !strcmpstart(url, "current/");
|
|
|
|
url = strchr(url, '/');
|
|
|
|
tor_assert(url);
|
|
|
|
++url;
|
|
|
|
if (!strcmp(url, "consensus")) {
|
|
|
|
const char *item;
|
|
|
|
tor_assert(!current); /* we handle current consensus specially above,
|
|
|
|
* since it wants to be spooled. */
|
2009-09-22 22:52:51 +02:00
|
|
|
if ((item = dirvote_get_pending_consensus(FLAV_NS)))
|
2007-08-15 21:55:52 +02:00
|
|
|
smartlist_add(items, (char*)item);
|
2007-08-24 16:41:06 +02:00
|
|
|
} else if (!current && !strcmp(url, "consensus-signatures")) {
|
2008-02-06 00:20:44 +01:00
|
|
|
/* XXXX the spec says that we should implement
|
|
|
|
* current/consensus-signatures too. It doesn't seem to be needed,
|
|
|
|
* though. */
|
2007-08-15 21:55:52 +02:00
|
|
|
const char *item;
|
|
|
|
if ((item=dirvote_get_pending_detached_signatures()))
|
|
|
|
smartlist_add(items, (char*)item);
|
2007-10-02 22:19:43 +02:00
|
|
|
} else if (!strcmp(url, "authority")) {
|
2007-08-15 21:55:52 +02:00
|
|
|
const cached_dir_t *d;
|
2007-10-09 21:14:48 +02:00
|
|
|
int flags = DGV_BY_ID |
|
|
|
|
(current ? DGV_INCLUDE_PREVIOUS : DGV_INCLUDE_PENDING);
|
|
|
|
if ((d=dirvote_get_vote(NULL, flags)))
|
2007-08-15 21:55:52 +02:00
|
|
|
smartlist_add(dir_items, (cached_dir_t*)d);
|
2007-10-02 22:19:43 +02:00
|
|
|
} else {
|
2007-08-15 21:55:52 +02:00
|
|
|
const cached_dir_t *d;
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *fps = smartlist_new();
|
2007-10-09 21:14:48 +02:00
|
|
|
int flags;
|
2007-10-02 22:19:43 +02:00
|
|
|
if (!strcmpstart(url, "d/")) {
|
|
|
|
url += 2;
|
2007-10-15 16:59:44 +02:00
|
|
|
flags = DGV_INCLUDE_PENDING | DGV_INCLUDE_PREVIOUS;
|
2007-10-02 22:19:43 +02:00
|
|
|
} else {
|
2007-10-09 21:14:48 +02:00
|
|
|
flags = DGV_BY_ID |
|
|
|
|
(current ? DGV_INCLUDE_PREVIOUS : DGV_INCLUDE_PENDING);
|
2007-10-02 22:19:43 +02:00
|
|
|
}
|
2009-10-18 21:45:57 +02:00
|
|
|
dir_split_resource_into_fingerprints(url, fps, NULL,
|
|
|
|
DSR_HEX|DSR_SORT_UNIQ);
|
2007-08-15 21:55:52 +02:00
|
|
|
SMARTLIST_FOREACH(fps, char *, fp, {
|
2007-10-09 21:14:48 +02:00
|
|
|
if ((d = dirvote_get_vote(fp, flags)))
|
2007-08-15 21:55:52 +02:00
|
|
|
smartlist_add(dir_items, (cached_dir_t*)d);
|
|
|
|
tor_free(fp);
|
|
|
|
});
|
|
|
|
smartlist_free(fps);
|
|
|
|
}
|
|
|
|
if (!smartlist_len(dir_items) && !smartlist_len(items)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2007-10-10 22:06:38 +02:00
|
|
|
goto vote_done;
|
2007-08-15 21:55:52 +02:00
|
|
|
}
|
2017-06-20 17:26:51 +02:00
|
|
|
|
|
|
|
/* We're sending items from at most one kind of source */
|
|
|
|
tor_assert_nonfatal(smartlist_len(items) == 0 ||
|
|
|
|
smartlist_len(dir_items) == 0);
|
|
|
|
|
|
|
|
int streaming;
|
|
|
|
unsigned mask;
|
|
|
|
if (smartlist_len(items)) {
|
|
|
|
/* We're taking strings and compressing them on the fly. */
|
|
|
|
streaming = 1;
|
|
|
|
mask = ~0u;
|
|
|
|
} else {
|
|
|
|
/* We're taking cached_dir_t objects. We only have them uncompressed
|
|
|
|
* or deflated. */
|
|
|
|
streaming = 0;
|
|
|
|
mask = (1u<<NO_METHOD) | (1u<<ZLIB_METHOD);
|
|
|
|
}
|
|
|
|
const compress_method_t compress_method = find_best_compression_method(
|
|
|
|
args->compression_supported&mask, streaming);
|
|
|
|
|
2007-08-15 21:55:52 +02:00
|
|
|
SMARTLIST_FOREACH(dir_items, cached_dir_t *, d,
|
2017-05-12 13:17:43 +02:00
|
|
|
body_len += compress_method != NO_METHOD ?
|
|
|
|
d->dir_compressed_len : d->dir_len);
|
2007-10-10 22:06:38 +02:00
|
|
|
estimated_len += body_len;
|
|
|
|
SMARTLIST_FOREACH(items, const char *, item, {
|
|
|
|
size_t ln = strlen(item);
|
2017-05-11 03:32:51 +02:00
|
|
|
if (compress_method != NO_METHOD) {
|
2007-10-10 22:06:38 +02:00
|
|
|
estimated_len += ln/2;
|
2008-01-25 02:45:06 +01:00
|
|
|
} else {
|
|
|
|
body_len += ln; estimated_len += ln;
|
2007-10-10 22:06:38 +02:00
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2009-03-31 03:29:07 +02:00
|
|
|
if (global_write_bucket_low(TO_CONN(conn), estimated_len, 2)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 503, "Directory busy, try again later");
|
2007-10-10 22:06:38 +02:00
|
|
|
goto vote_done;
|
|
|
|
}
|
2017-04-28 20:42:22 +02:00
|
|
|
write_http_response_header(conn, body_len ? body_len : -1,
|
2017-05-11 03:32:51 +02:00
|
|
|
compress_method,
|
2007-08-15 21:55:52 +02:00
|
|
|
lifetime);
|
|
|
|
|
|
|
|
if (smartlist_len(items)) {
|
2017-05-11 03:32:51 +02:00
|
|
|
if (compress_method != NO_METHOD) {
|
|
|
|
conn->compress_state = tor_compress_new(1, compress_method,
|
2017-04-17 14:57:37 +02:00
|
|
|
choose_compression_level(estimated_len));
|
2007-08-15 21:55:52 +02:00
|
|
|
SMARTLIST_FOREACH(items, const char *, c,
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add_compress(c, strlen(c), conn, 0));
|
|
|
|
connection_buf_add_compress("", 0, conn, 1);
|
2007-08-15 21:55:52 +02:00
|
|
|
} else {
|
|
|
|
SMARTLIST_FOREACH(items, const char *, c,
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(c, strlen(c), TO_CONN(conn)));
|
2007-08-15 21:55:52 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
SMARTLIST_FOREACH(dir_items, cached_dir_t *, d,
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(compress_method != NO_METHOD ?
|
2017-05-12 13:17:43 +02:00
|
|
|
d->dir_compressed : d->dir,
|
|
|
|
compress_method != NO_METHOD ?
|
|
|
|
d->dir_compressed_len : d->dir_len,
|
2007-08-15 21:55:52 +02:00
|
|
|
TO_CONN(conn)));
|
|
|
|
}
|
2007-10-10 22:06:38 +02:00
|
|
|
vote_done:
|
|
|
|
smartlist_free(items);
|
|
|
|
smartlist_free(dir_items);
|
2007-08-15 21:55:57 +02:00
|
|
|
goto done;
|
2007-08-15 21:55:52 +02:00
|
|
|
}
|
2016-05-10 19:48:17 +02:00
|
|
|
done:
|
|
|
|
return 0;
|
|
|
|
}
|
2007-08-15 21:55:52 +02:00
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
/** Helper function for GET /tor/micro/d/...
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
handle_get_microdesc(dir_connection_t *conn, const get_handler_args_t *args)
|
|
|
|
{
|
|
|
|
const char *url = args->url;
|
2017-05-11 03:32:51 +02:00
|
|
|
const compress_method_t compress_method =
|
2017-05-12 13:55:18 +02:00
|
|
|
find_best_compression_method(args->compression_supported, 1);
|
2017-03-13 20:38:20 +01:00
|
|
|
int clear_spool = 1;
|
2016-05-10 19:48:17 +02:00
|
|
|
{
|
2017-03-13 20:38:20 +01:00
|
|
|
conn->spool = smartlist_new();
|
2009-10-18 21:45:57 +02:00
|
|
|
|
2017-03-13 20:38:20 +01:00
|
|
|
dir_split_resource_into_spoolable(url+strlen("/tor/micro/d/"),
|
|
|
|
DIR_SPOOL_MICRODESC,
|
|
|
|
conn->spool, NULL,
|
2009-10-18 21:45:57 +02:00
|
|
|
DSR_DIGEST256|DSR_BASE64|DSR_SORT_UNIQ);
|
|
|
|
|
2017-03-13 20:38:20 +01:00
|
|
|
size_t size_guess = 0;
|
2017-05-11 03:32:51 +02:00
|
|
|
dirserv_spool_remove_missing_and_guess_size(conn, 0,
|
|
|
|
compress_method != NO_METHOD,
|
2017-03-13 20:38:20 +01:00
|
|
|
&size_guess, NULL);
|
|
|
|
if (smartlist_len(conn->spool) == 0) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2009-10-18 21:45:57 +02:00
|
|
|
goto done;
|
|
|
|
}
|
2017-03-13 20:38:20 +01:00
|
|
|
if (global_write_bucket_low(TO_CONN(conn), size_guess, 2)) {
|
2009-10-18 21:45:57 +02:00
|
|
|
log_info(LD_DIRSERV,
|
|
|
|
"Client asked for server descriptors, but we've been "
|
|
|
|
"writing too many bytes lately. Sending 503 Dir busy.");
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 503, "Directory busy, try again later");
|
2009-10-18 21:45:57 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2017-03-13 20:38:20 +01:00
|
|
|
clear_spool = 0;
|
2017-04-28 20:42:22 +02:00
|
|
|
write_http_response_header(conn, -1,
|
2017-05-11 03:32:51 +02:00
|
|
|
compress_method,
|
2017-04-28 20:42:22 +02:00
|
|
|
MICRODESC_CACHE_LIFETIME);
|
2009-10-18 21:45:57 +02:00
|
|
|
|
2017-05-11 03:32:51 +02:00
|
|
|
if (compress_method != NO_METHOD)
|
|
|
|
conn->compress_state = tor_compress_new(1, compress_method,
|
2017-03-13 20:38:20 +01:00
|
|
|
choose_compression_level(size_guess));
|
2009-10-18 21:45:57 +02:00
|
|
|
|
2017-03-17 16:34:41 +01:00
|
|
|
const int initial_flush_result = connection_dirserv_flushed_some(conn);
|
|
|
|
tor_assert_nonfatal(initial_flush_result == 0);
|
2009-10-18 21:45:57 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
done:
|
2017-03-13 20:38:20 +01:00
|
|
|
if (clear_spool) {
|
|
|
|
dir_conn_clear_spool(conn);
|
|
|
|
}
|
2016-05-10 19:48:17 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Helper function for GET /tor/{server,extra}/...
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
handle_get_descriptor(dir_connection_t *conn, const get_handler_args_t *args)
|
|
|
|
{
|
|
|
|
const char *url = args->url;
|
2017-05-11 03:32:51 +02:00
|
|
|
const compress_method_t compress_method =
|
2017-05-12 13:55:18 +02:00
|
|
|
find_best_compression_method(args->compression_supported, 1);
|
2016-05-10 19:48:17 +02:00
|
|
|
const or_options_t *options = get_options();
|
2017-03-13 20:38:20 +01:00
|
|
|
int clear_spool = 1;
|
2007-04-16 20:39:39 +02:00
|
|
|
if (!strcmpstart(url,"/tor/server/") ||
|
2009-01-06 17:03:38 +01:00
|
|
|
(!options->BridgeAuthoritativeDir &&
|
|
|
|
!options->BridgeRelay && !strcmpstart(url,"/tor/extra/"))) {
|
2005-10-18 19:09:57 +02:00
|
|
|
int res;
|
2017-03-13 20:38:20 +01:00
|
|
|
const char *msg = NULL;
|
2006-10-20 01:05:34 +02:00
|
|
|
int cache_lifetime = 0;
|
2007-04-16 20:39:39 +02:00
|
|
|
int is_extra = !strcmpstart(url,"/tor/extra/");
|
|
|
|
url += is_extra ? strlen("/tor/extra/") : strlen("/tor/server/");
|
2017-03-13 20:38:20 +01:00
|
|
|
dir_spool_source_t source;
|
|
|
|
time_t publish_cutoff = 0;
|
|
|
|
if (!strcmpstart(url, "d/")) {
|
|
|
|
source =
|
2007-04-16 20:39:39 +02:00
|
|
|
is_extra ? DIR_SPOOL_EXTRA_BY_DIGEST : DIR_SPOOL_SERVER_BY_DIGEST;
|
2017-03-13 20:38:20 +01:00
|
|
|
} else {
|
|
|
|
source =
|
2007-04-16 20:39:39 +02:00
|
|
|
is_extra ? DIR_SPOOL_EXTRA_BY_FP : DIR_SPOOL_SERVER_BY_FP;
|
2017-03-13 20:38:20 +01:00
|
|
|
/* We only want to apply a publish cutoff when we're requesting
|
|
|
|
* resources by fingerprint. */
|
|
|
|
publish_cutoff = time(NULL) - ROUTER_MAX_AGE_TO_PUBLISH;
|
|
|
|
}
|
2007-08-14 22:19:58 +02:00
|
|
|
|
2017-03-13 20:38:20 +01:00
|
|
|
conn->spool = smartlist_new();
|
|
|
|
res = dirserv_get_routerdesc_spool(conn->spool, url,
|
|
|
|
source,
|
|
|
|
connection_dir_is_encrypted(conn),
|
|
|
|
&msg);
|
|
|
|
|
|
|
|
if (!strcmpstart(url, "all")) {
|
|
|
|
cache_lifetime = FULL_DIR_CACHE_LIFETIME;
|
|
|
|
} else if (smartlist_len(conn->spool) == 1) {
|
|
|
|
cache_lifetime = ROUTERDESC_BY_DIGEST_CACHE_LIFETIME;
|
2007-08-14 22:19:58 +02:00
|
|
|
}
|
|
|
|
|
2017-03-13 20:38:20 +01:00
|
|
|
size_t size_guess = 0;
|
|
|
|
int n_expired = 0;
|
|
|
|
dirserv_spool_remove_missing_and_guess_size(conn, publish_cutoff,
|
2017-05-11 03:32:51 +02:00
|
|
|
compress_method != NO_METHOD,
|
|
|
|
&size_guess, &n_expired);
|
2017-03-13 20:38:20 +01:00
|
|
|
|
|
|
|
/* If we are the bridge authority and the descriptor is a bridge
|
|
|
|
* descriptor, remember that we served this descriptor for desc stats. */
|
|
|
|
/* XXXX it's a bit of a kludge to have this here. */
|
|
|
|
if (get_options()->BridgeAuthoritativeDir &&
|
|
|
|
source == DIR_SPOOL_SERVER_BY_FP) {
|
|
|
|
SMARTLIST_FOREACH_BEGIN(conn->spool, spooled_resource_t *, spooled) {
|
|
|
|
const routerinfo_t *router =
|
|
|
|
router_get_by_id_digest((const char *)spooled->digest);
|
|
|
|
/* router can be NULL here when the bridge auth is asked for its own
|
|
|
|
* descriptor. */
|
|
|
|
if (router && router->purpose == ROUTER_PURPOSE_BRIDGE)
|
|
|
|
rep_hist_note_desc_served(router->cache_info.identity_digest);
|
|
|
|
} SMARTLIST_FOREACH_END(spooled);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res < 0 || size_guess == 0 || smartlist_len(conn->spool) == 0) {
|
|
|
|
if (msg == NULL)
|
|
|
|
msg = "Not found";
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, msg);
|
2017-03-13 20:38:20 +01:00
|
|
|
} else {
|
|
|
|
if (global_write_bucket_low(TO_CONN(conn), size_guess, 2)) {
|
2007-01-05 07:03:10 +01:00
|
|
|
log_info(LD_DIRSERV,
|
|
|
|
"Client asked for server descriptors, but we've been "
|
|
|
|
"writing too many bytes lately. Sending 503 Dir busy.");
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 503,
|
|
|
|
"Directory busy, try again later");
|
2017-03-13 20:38:20 +01:00
|
|
|
dir_conn_clear_spool(conn);
|
2007-08-15 21:55:57 +02:00
|
|
|
goto done;
|
2007-01-05 07:03:10 +01:00
|
|
|
}
|
2017-05-11 03:32:51 +02:00
|
|
|
write_http_response_header(conn, -1, compress_method, cache_lifetime);
|
|
|
|
if (compress_method != NO_METHOD)
|
|
|
|
conn->compress_state = tor_compress_new(1, compress_method,
|
2017-03-13 20:38:20 +01:00
|
|
|
choose_compression_level(size_guess));
|
|
|
|
clear_spool = 0;
|
2006-06-18 09:38:55 +02:00
|
|
|
/* Prime the connection with some data. */
|
2017-03-17 16:34:41 +01:00
|
|
|
int initial_flush_result = connection_dirserv_flushed_some(conn);
|
|
|
|
tor_assert_nonfatal(initial_flush_result == 0);
|
2005-08-25 22:33:17 +02:00
|
|
|
}
|
2007-08-15 21:55:57 +02:00
|
|
|
goto done;
|
2005-08-25 22:33:17 +02:00
|
|
|
}
|
2016-05-10 19:48:17 +02:00
|
|
|
done:
|
2017-03-13 20:38:20 +01:00
|
|
|
if (clear_spool)
|
|
|
|
dir_conn_clear_spool(conn);
|
|
|
|
return 0;
|
2016-05-10 19:48:17 +02:00
|
|
|
}
|
2005-08-25 22:33:17 +02:00
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
/** Helper function for GET /tor/keys/...
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
handle_get_keys(dir_connection_t *conn, const get_handler_args_t *args)
|
|
|
|
{
|
|
|
|
const char *url = args->url;
|
2017-05-11 03:32:51 +02:00
|
|
|
const compress_method_t compress_method =
|
2017-05-12 13:55:18 +02:00
|
|
|
find_best_compression_method(args->compression_supported, 1);
|
2016-05-10 19:48:17 +02:00
|
|
|
const time_t if_modified_since = args->if_modified_since;
|
|
|
|
{
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *certs = smartlist_new();
|
2007-08-14 23:09:30 +02:00
|
|
|
ssize_t len = -1;
|
2007-08-15 17:38:53 +02:00
|
|
|
if (!strcmp(url, "/tor/keys/all")) {
|
2008-01-27 00:18:30 +01:00
|
|
|
authority_cert_get_all(certs);
|
2007-08-15 17:38:53 +02:00
|
|
|
} else if (!strcmp(url, "/tor/keys/authority")) {
|
2007-08-14 23:09:30 +02:00
|
|
|
authority_cert_t *cert = get_my_v3_authority_cert();
|
|
|
|
if (cert)
|
|
|
|
smartlist_add(certs, cert);
|
2007-08-15 17:38:53 +02:00
|
|
|
} else if (!strcmpstart(url, "/tor/keys/fp/")) {
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *fps = smartlist_new();
|
2007-08-15 17:38:53 +02:00
|
|
|
dir_split_resource_into_fingerprints(url+strlen("/tor/keys/fp/"),
|
2009-10-18 21:45:57 +02:00
|
|
|
fps, NULL,
|
|
|
|
DSR_HEX|DSR_SORT_UNIQ);
|
2007-08-14 23:09:30 +02:00
|
|
|
SMARTLIST_FOREACH(fps, char *, d, {
|
|
|
|
authority_cert_t *c = authority_cert_get_newest_by_id(d);
|
|
|
|
if (c) smartlist_add(certs, c);
|
|
|
|
tor_free(d);
|
|
|
|
});
|
|
|
|
smartlist_free(fps);
|
2007-08-15 17:38:53 +02:00
|
|
|
} else if (!strcmpstart(url, "/tor/keys/sk/")) {
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *fps = smartlist_new();
|
2007-08-15 17:38:53 +02:00
|
|
|
dir_split_resource_into_fingerprints(url+strlen("/tor/keys/sk/"),
|
2009-10-18 21:45:57 +02:00
|
|
|
fps, NULL,
|
|
|
|
DSR_HEX|DSR_SORT_UNIQ);
|
2007-08-14 23:09:30 +02:00
|
|
|
SMARTLIST_FOREACH(fps, char *, d, {
|
|
|
|
authority_cert_t *c = authority_cert_get_by_sk_digest(d);
|
|
|
|
if (c) smartlist_add(certs, c);
|
|
|
|
tor_free(d);
|
|
|
|
});
|
|
|
|
smartlist_free(fps);
|
2008-12-12 20:05:36 +01:00
|
|
|
} else if (!strcmpstart(url, "/tor/keys/fp-sk/")) {
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *fp_sks = smartlist_new();
|
2008-12-12 20:05:36 +01:00
|
|
|
dir_split_resource_into_fingerprint_pairs(url+strlen("/tor/keys/fp-sk/"),
|
|
|
|
fp_sks);
|
|
|
|
SMARTLIST_FOREACH(fp_sks, fp_pair_t *, pair, {
|
|
|
|
authority_cert_t *c = authority_cert_get_by_digests(pair->first,
|
|
|
|
pair->second);
|
|
|
|
if (c) smartlist_add(certs, c);
|
|
|
|
tor_free(pair);
|
|
|
|
});
|
|
|
|
smartlist_free(fp_sks);
|
2007-08-14 23:09:30 +02:00
|
|
|
} else {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 400, "Bad request");
|
2007-10-10 22:06:38 +02:00
|
|
|
goto keys_done;
|
2007-08-14 23:09:30 +02:00
|
|
|
}
|
|
|
|
if (!smartlist_len(certs)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2007-10-10 22:06:38 +02:00
|
|
|
goto keys_done;
|
2007-08-14 23:09:30 +02:00
|
|
|
}
|
2007-10-10 22:28:01 +02:00
|
|
|
SMARTLIST_FOREACH(certs, authority_cert_t *, c,
|
2007-10-10 22:29:52 +02:00
|
|
|
if (c->cache_info.published_on < if_modified_since)
|
2007-10-10 22:28:01 +02:00
|
|
|
SMARTLIST_DEL_CURRENT(certs, c));
|
|
|
|
if (!smartlist_len(certs)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 304, "Not modified");
|
2007-10-10 22:28:01 +02:00
|
|
|
goto keys_done;
|
|
|
|
}
|
2007-10-10 22:06:38 +02:00
|
|
|
len = 0;
|
|
|
|
SMARTLIST_FOREACH(certs, authority_cert_t *, c,
|
|
|
|
len += c->cache_info.signed_descriptor_len);
|
|
|
|
|
2017-05-11 03:32:51 +02:00
|
|
|
if (global_write_bucket_low(TO_CONN(conn),
|
|
|
|
compress_method != NO_METHOD ? len/2 : len,
|
|
|
|
2)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 503, "Directory busy, try again later");
|
2007-10-10 22:06:38 +02:00
|
|
|
goto keys_done;
|
2007-08-14 23:09:30 +02:00
|
|
|
}
|
2007-10-10 22:06:38 +02:00
|
|
|
|
2017-05-11 03:32:51 +02:00
|
|
|
write_http_response_header(conn,
|
|
|
|
compress_method != NO_METHOD ? -1 : len,
|
|
|
|
compress_method,
|
2017-04-28 20:42:22 +02:00
|
|
|
60*60);
|
2017-05-11 03:32:51 +02:00
|
|
|
if (compress_method != NO_METHOD) {
|
|
|
|
conn->compress_state = tor_compress_new(1, compress_method,
|
2017-04-18 01:10:25 +02:00
|
|
|
choose_compression_level(len));
|
2007-08-14 23:09:30 +02:00
|
|
|
SMARTLIST_FOREACH(certs, authority_cert_t *, c,
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add_compress(
|
2017-04-18 01:23:39 +02:00
|
|
|
c->cache_info.signed_descriptor_body,
|
|
|
|
c->cache_info.signed_descriptor_len,
|
|
|
|
conn, 0));
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add_compress("", 0, conn, 1);
|
2007-08-14 23:09:30 +02:00
|
|
|
} else {
|
|
|
|
SMARTLIST_FOREACH(certs, authority_cert_t *, c,
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(c->cache_info.signed_descriptor_body,
|
2007-08-14 23:09:30 +02:00
|
|
|
c->cache_info.signed_descriptor_len,
|
|
|
|
TO_CONN(conn)));
|
|
|
|
}
|
2007-10-10 22:06:38 +02:00
|
|
|
keys_done:
|
2007-08-14 23:09:30 +02:00
|
|
|
smartlist_free(certs);
|
2007-08-15 21:55:57 +02:00
|
|
|
goto done;
|
2007-08-14 23:09:30 +02:00
|
|
|
}
|
2016-05-10 19:48:17 +02:00
|
|
|
done:
|
|
|
|
return 0;
|
|
|
|
}
|
2007-08-14 23:09:30 +02:00
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
/** Helper function for GET /tor/rendezvous2/
|
|
|
|
*/
|
|
|
|
static int
|
2016-08-25 17:04:59 +02:00
|
|
|
handle_get_hs_descriptor_v2(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args)
|
2016-05-10 19:48:17 +02:00
|
|
|
{
|
|
|
|
const char *url = args->url;
|
|
|
|
if (connection_dir_is_encrypted(conn)) {
|
2007-10-29 20:10:42 +01:00
|
|
|
/* Handle v2 rendezvous descriptor fetch request. */
|
2007-10-29 20:10:47 +01:00
|
|
|
const char *descp;
|
2007-10-29 20:10:42 +01:00
|
|
|
const char *query = url + strlen("/tor/rendezvous2/");
|
2015-04-21 20:04:39 +02:00
|
|
|
if (rend_valid_descriptor_id(query)) {
|
2007-10-29 20:10:42 +01:00
|
|
|
log_info(LD_REND, "Got a v2 rendezvous descriptor request for ID '%s'",
|
2014-09-09 16:22:01 +02:00
|
|
|
safe_str(escaped(query)));
|
2007-10-31 21:48:06 +01:00
|
|
|
switch (rend_cache_lookup_v2_desc_as_dir(query, &descp)) {
|
2007-10-29 20:10:42 +01:00
|
|
|
case 1: /* valid */
|
2017-04-28 20:42:22 +02:00
|
|
|
write_http_response_header(conn, strlen(descp), NO_METHOD, 0);
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(descp, strlen(descp), TO_CONN(conn));
|
2007-10-29 20:10:42 +01:00
|
|
|
break;
|
|
|
|
case 0: /* well-formed but not present */
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2007-10-29 20:10:42 +01:00
|
|
|
break;
|
|
|
|
case -1: /* not well-formed */
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 400, "Bad request");
|
2007-10-29 20:10:42 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else { /* not well-formed */
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 400, "Bad request");
|
2007-10-29 20:10:42 +01:00
|
|
|
}
|
|
|
|
goto done;
|
2016-05-10 19:48:17 +02:00
|
|
|
} else {
|
|
|
|
/* Not encrypted! */
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2007-10-29 20:10:42 +01:00
|
|
|
}
|
2016-05-10 19:48:17 +02:00
|
|
|
done:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-25 17:04:59 +02:00
|
|
|
/** Helper function for GET /tor/hs/3/<z>. Only for version 3.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
handle_get_hs_descriptor_v3(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args)
|
|
|
|
{
|
|
|
|
int retval;
|
2016-09-15 20:13:18 +02:00
|
|
|
const char *desc_str = NULL;
|
2016-08-25 17:04:59 +02:00
|
|
|
const char *pubkey_str = NULL;
|
|
|
|
const char *url = args->url;
|
|
|
|
|
|
|
|
/* Reject unencrypted dir connections */
|
|
|
|
if (!connection_dir_is_encrypted(conn)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2016-08-25 17:04:59 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* After the path prefix follows the base64 encoded blinded pubkey which we
|
|
|
|
* use to get the descriptor from the cache. Skip the prefix and get the
|
|
|
|
* pubkey. */
|
|
|
|
tor_assert(!strcmpstart(url, "/tor/hs/3/"));
|
|
|
|
pubkey_str = url + strlen("/tor/hs/3/");
|
|
|
|
retval = hs_cache_lookup_as_dir(HS_VERSION_THREE,
|
|
|
|
pubkey_str, &desc_str);
|
2017-02-15 16:27:32 +01:00
|
|
|
if (retval <= 0 || desc_str == NULL) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2016-08-25 17:04:59 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Found requested descriptor! Pass it to this nice client. */
|
2017-04-28 20:42:22 +02:00
|
|
|
write_http_response_header(conn, strlen(desc_str), NO_METHOD, 0);
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(desc_str, strlen(desc_str), TO_CONN(conn));
|
2016-08-25 17:04:59 +02:00
|
|
|
|
|
|
|
done:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
/** Helper function for GET /tor/networkstatus-bridges
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
handle_get_networkstatus_bridges(dir_connection_t *conn,
|
|
|
|
const get_handler_args_t *args)
|
|
|
|
{
|
|
|
|
const char *headers = args->headers;
|
2007-10-29 20:10:42 +01:00
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
const or_options_t *options = get_options();
|
2007-12-22 11:54:21 +01:00
|
|
|
if (options->BridgeAuthoritativeDir &&
|
2012-10-12 18:22:13 +02:00
|
|
|
options->BridgePassword_AuthDigest_ &&
|
2016-05-10 19:48:17 +02:00
|
|
|
connection_dir_is_encrypted(conn)) {
|
2007-12-22 11:54:21 +01:00
|
|
|
char *status;
|
2012-04-01 04:51:28 +02:00
|
|
|
char digest[DIGEST256_LEN];
|
2007-12-22 11:54:21 +01:00
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
char *header = http_get_header(headers, "Authorization: Basic ");
|
2012-04-01 04:51:28 +02:00
|
|
|
if (header)
|
|
|
|
crypto_digest256(digest, header, strlen(header), DIGEST_SHA256);
|
2007-12-22 11:54:21 +01:00
|
|
|
|
2008-02-20 03:05:28 +01:00
|
|
|
/* now make sure the password is there and right */
|
2012-04-01 04:51:28 +02:00
|
|
|
if (!header ||
|
2012-04-01 21:59:38 +02:00
|
|
|
tor_memneq(digest,
|
2012-10-12 18:22:13 +02:00
|
|
|
options->BridgePassword_AuthDigest_, DIGEST256_LEN)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2007-12-22 11:54:21 +01:00
|
|
|
tor_free(header);
|
|
|
|
goto done;
|
|
|
|
}
|
2008-02-20 03:05:28 +01:00
|
|
|
tor_free(header);
|
2007-12-22 11:54:21 +01:00
|
|
|
|
|
|
|
/* all happy now. send an answer. */
|
|
|
|
status = networkstatus_getinfo_by_purpose("bridge", time(NULL));
|
2016-05-10 19:48:17 +02:00
|
|
|
size_t dlen = strlen(status);
|
2017-04-28 20:42:22 +02:00
|
|
|
write_http_response_header(conn, dlen, NO_METHOD, 0);
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(status, dlen, TO_CONN(conn));
|
2007-12-22 11:54:21 +01:00
|
|
|
tor_free(status);
|
|
|
|
goto done;
|
|
|
|
}
|
2016-05-10 19:48:17 +02:00
|
|
|
done:
|
|
|
|
return 0;
|
|
|
|
}
|
2007-12-22 11:54:21 +01:00
|
|
|
|
2016-05-10 19:48:17 +02:00
|
|
|
/** Helper function for GET robots.txt or /tor/robots.txt */
|
|
|
|
static int
|
|
|
|
handle_get_robots(dir_connection_t *conn, const get_handler_args_t *args)
|
|
|
|
{
|
|
|
|
(void)args;
|
|
|
|
{
|
|
|
|
const char robots[] = "User-agent: *\r\nDisallow: /\r\n";
|
2006-03-27 09:33:13 +02:00
|
|
|
size_t len = strlen(robots);
|
2017-04-28 20:42:22 +02:00
|
|
|
write_http_response_header(conn, len, NO_METHOD, ROBOTS_CACHE_LIFETIME);
|
2017-08-08 21:16:39 +02:00
|
|
|
connection_buf_add(robots, len, TO_CONN(conn));
|
2006-06-16 00:52:56 +02:00
|
|
|
}
|
2003-12-17 10:42:28 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-11 21:21:54 +02:00
|
|
|
/* Given the <b>url</b> from a POST request, try to extract the version number
|
|
|
|
* using the provided <b>prefix</b>. The version should be after the prefix and
|
2018-01-24 09:55:15 +01:00
|
|
|
* ending with the separator "/". For instance:
|
2016-08-11 21:21:54 +02:00
|
|
|
* /tor/hs/3/publish
|
|
|
|
*
|
|
|
|
* On success, <b>end_pos</b> points to the position right after the version
|
|
|
|
* was found. On error, it is set to NULL.
|
|
|
|
*
|
|
|
|
* Return version on success else negative value. */
|
|
|
|
STATIC int
|
|
|
|
parse_hs_version_from_post(const char *url, const char *prefix,
|
|
|
|
const char **end_pos)
|
|
|
|
{
|
|
|
|
int ok;
|
|
|
|
unsigned long version;
|
|
|
|
const char *start;
|
|
|
|
char *end = NULL;
|
|
|
|
|
|
|
|
tor_assert(url);
|
|
|
|
tor_assert(prefix);
|
|
|
|
tor_assert(end_pos);
|
|
|
|
|
|
|
|
/* Check if the prefix does start the url. */
|
|
|
|
if (strcmpstart(url, prefix)) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
/* Move pointer to the end of the prefix string. */
|
|
|
|
start = url + strlen(prefix);
|
|
|
|
/* Try this to be the HS version and if we are still at the separator, next
|
|
|
|
* will be move to the right value. */
|
|
|
|
version = tor_parse_long(start, 10, 0, INT_MAX, &ok, &end);
|
|
|
|
if (!ok) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
*end_pos = end;
|
|
|
|
return (int) version;
|
|
|
|
err:
|
|
|
|
*end_pos = NULL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle the POST request for a hidden service descripror. The request is in
|
|
|
|
* <b>url</b>, the body of the request is in <b>body</b>. Return 200 on success
|
|
|
|
* else return 400 indicating a bad request. */
|
2016-08-25 17:11:23 +02:00
|
|
|
STATIC int
|
2016-08-11 21:21:54 +02:00
|
|
|
handle_post_hs_descriptor(const char *url, const char *body)
|
|
|
|
{
|
|
|
|
int version;
|
|
|
|
const char *end_pos;
|
|
|
|
|
|
|
|
tor_assert(url);
|
|
|
|
tor_assert(body);
|
|
|
|
|
|
|
|
version = parse_hs_version_from_post(url, "/tor/hs/", &end_pos);
|
|
|
|
if (version < 0) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We have a valid version number, now make sure it's a publish request. Use
|
|
|
|
* the end position just after the version and check for the command. */
|
|
|
|
if (strcmpstart(end_pos, "/publish")) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (version) {
|
|
|
|
case HS_VERSION_THREE:
|
|
|
|
if (hs_cache_store_as_dir(body) < 0) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
log_info(LD_REND, "Publish request for HS descriptor handled "
|
|
|
|
"successfully.");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Unsupported version, return a bad request. */
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 200;
|
|
|
|
err:
|
|
|
|
/* Bad request. */
|
|
|
|
return 400;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Helper function: called when a dirserver gets a complete HTTP POST
|
2004-05-06 13:08:04 +02:00
|
|
|
* request. Look for an uploaded server descriptor or rendezvous
|
|
|
|
* service descriptor. On finding one, process it and write a
|
2004-05-10 06:34:48 +02:00
|
|
|
* response into conn-\>outbuf. If the request is unrecognized, send a
|
2004-09-27 05:39:30 +02:00
|
|
|
* 400. Always return 0. */
|
2016-12-12 12:53:11 +01:00
|
|
|
MOCK_IMPL(STATIC int,
|
|
|
|
directory_handle_command_post,(dir_connection_t *conn, const char *headers,
|
|
|
|
const char *body, size_t body_len))
|
2004-05-13 01:48:57 +02:00
|
|
|
{
|
2005-08-24 00:27:17 +02:00
|
|
|
char *url = NULL;
|
2011-06-14 19:01:38 +02:00
|
|
|
const or_options_t *options = get_options();
|
2003-12-17 10:42:28 +01:00
|
|
|
|
2006-02-13 10:37:53 +01:00
|
|
|
log_debug(LD_DIRSERV,"Received POST command.");
|
2004-03-31 00:57:49 +02:00
|
|
|
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.state = DIR_CONN_STATE_SERVER_WRITING;
|
2004-03-31 00:57:49 +02:00
|
|
|
|
2016-03-16 21:46:14 +01:00
|
|
|
if (!public_server_mode(options)) {
|
|
|
|
log_info(LD_DIR, "Rejected dir post request from %s "
|
|
|
|
"since we're not a public relay.", conn->base_.address);
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 503, "Not acting as a public relay");
|
2016-03-16 21:46:14 +01:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2007-10-29 20:10:42 +01:00
|
|
|
if (parse_http_url(headers, &url) < 0) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 400, "Bad request");
|
2007-10-29 20:10:42 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2014-09-09 16:22:01 +02:00
|
|
|
log_debug(LD_DIRSERV,"rewritten url as '%s'.", escaped(url));
|
2007-10-29 20:10:42 +01:00
|
|
|
|
|
|
|
/* Handle v2 rendezvous service publish request. */
|
2015-07-10 15:03:56 +02:00
|
|
|
if (connection_dir_is_encrypted(conn) &&
|
2007-10-29 20:10:42 +01:00
|
|
|
!strcmpstart(url,"/tor/rendezvous2/publish")) {
|
2016-03-08 01:29:05 +01:00
|
|
|
if (rend_cache_store_v2_desc_as_dir(body) < 0) {
|
2017-02-07 15:29:37 +01:00
|
|
|
log_warn(LD_REND, "Rejected v2 rend descriptor (body size %d) from %s.",
|
2016-03-21 22:08:02 +01:00
|
|
|
(int)body_len, conn->base_.address);
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 400,
|
2016-03-21 22:08:02 +01:00
|
|
|
"Invalid v2 service descriptor rejected");
|
2016-03-08 01:29:05 +01:00
|
|
|
} else {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 200, "Service descriptor (v2) stored");
|
2016-03-21 22:08:02 +01:00
|
|
|
log_info(LD_REND, "Handled v2 rendezvous descriptor post: accepted");
|
2007-10-29 20:10:42 +01:00
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2016-08-11 21:21:54 +02:00
|
|
|
/* Handle HS descriptor publish request. */
|
|
|
|
/* XXX: This should be disabled with a consensus param until we want to
|
|
|
|
* the prop224 be deployed and thus use. */
|
|
|
|
if (connection_dir_is_encrypted(conn) && !strcmpstart(url, "/tor/hs/")) {
|
|
|
|
const char *msg = "HS descriptor stored successfully.";
|
2016-08-25 17:52:29 +02:00
|
|
|
|
2016-08-11 21:21:54 +02:00
|
|
|
/* We most probably have a publish request for an HS descriptor. */
|
|
|
|
int code = handle_post_hs_descriptor(url, body);
|
|
|
|
if (code != 200) {
|
|
|
|
msg = "Invalid HS descriptor. Rejected.";
|
|
|
|
}
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, code, msg);
|
2016-08-11 21:21:54 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2006-10-02 00:16:55 +02:00
|
|
|
if (!authdir_mode(options)) {
|
2004-07-21 10:40:57 +02:00
|
|
|
/* we just provide cached directories; we don't want to
|
|
|
|
* receive anything. */
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 400, "Nonauthoritative directory does not "
|
2005-12-14 21:40:40 +01:00
|
|
|
"accept posted server descriptors");
|
2008-09-05 22:52:15 +02:00
|
|
|
goto done;
|
2004-07-21 10:40:57 +02:00
|
|
|
}
|
|
|
|
|
2017-06-02 18:26:53 +02:00
|
|
|
if (authdir_mode(options) &&
|
2007-05-02 11:12:04 +02:00
|
|
|
!strcmp(url,"/tor/")) { /* server descriptor post */
|
2008-12-17 22:50:01 +01:00
|
|
|
const char *msg = "[None]";
|
2007-06-09 09:05:19 +02:00
|
|
|
uint8_t purpose = authdir_mode_bridge(options) ?
|
2007-09-27 22:46:28 +02:00
|
|
|
ROUTER_PURPOSE_BRIDGE : ROUTER_PURPOSE_GENERAL;
|
2008-12-17 22:50:01 +01:00
|
|
|
was_router_added_t r = dirserv_add_multiple_descriptors(body, purpose,
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address, &msg);
|
2005-08-31 08:14:37 +02:00
|
|
|
tor_assert(msg);
|
2008-12-17 22:50:01 +01:00
|
|
|
|
2017-05-11 02:03:07 +02:00
|
|
|
if (r == ROUTER_ADDED_SUCCESSFULLY) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 200, msg);
|
2008-12-17 22:50:01 +01:00
|
|
|
} else if (WRA_WAS_OUTDATED(r)) {
|
|
|
|
write_http_response_header_impl(conn, -1, NULL, NULL,
|
|
|
|
"X-Descriptor-Not-New: Yes\r\n", -1);
|
|
|
|
} else {
|
|
|
|
log_info(LD_DIRSERV,
|
|
|
|
"Rejected router descriptor or extra-info from %s "
|
|
|
|
"(\"%s\").",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address, msg);
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 400, msg);
|
2004-03-31 00:57:49 +02:00
|
|
|
}
|
2008-12-22 07:21:28 +01:00
|
|
|
goto done;
|
2004-03-31 00:57:49 +02:00
|
|
|
}
|
|
|
|
|
2007-07-26 00:56:44 +02:00
|
|
|
if (authdir_mode_v3(options) &&
|
2007-12-02 05:39:56 +01:00
|
|
|
!strcmp(url,"/tor/post/vote")) { /* v3 networkstatus vote */
|
2007-07-26 00:56:44 +02:00
|
|
|
const char *msg = "OK";
|
2007-08-13 23:01:02 +02:00
|
|
|
int status;
|
|
|
|
if (dirvote_add_vote(body, &msg, &status)) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, status, "Vote stored");
|
2007-07-26 00:56:44 +02:00
|
|
|
} else {
|
|
|
|
tor_assert(msg);
|
2011-03-15 17:13:25 +01:00
|
|
|
log_warn(LD_DIRSERV, "Rejected vote from %s (\"%s\").",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address, msg);
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, status, msg);
|
2007-07-26 00:56:44 +02:00
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2007-07-29 04:55:21 +02:00
|
|
|
if (authdir_mode_v3(options) &&
|
|
|
|
!strcmp(url,"/tor/post/consensus-signature")) { /* sigs on consensus. */
|
2007-10-10 01:02:02 +02:00
|
|
|
const char *msg = NULL;
|
2012-10-12 18:22:13 +02:00
|
|
|
if (dirvote_add_signatures(body, conn->base_.address, &msg)>=0) {
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 200, msg?msg:"Signatures stored");
|
2007-07-29 04:55:21 +02:00
|
|
|
} else {
|
2007-10-10 01:02:02 +02:00
|
|
|
log_warn(LD_DIR, "Unable to store signatures posted by %s: %s",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address, msg?msg:"???");
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 400,
|
|
|
|
msg?msg:"Unable to store signatures");
|
2007-07-29 04:55:21 +02:00
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2004-03-31 00:57:49 +02:00
|
|
|
/* we didn't recognize the url */
|
2017-09-14 14:39:08 +02:00
|
|
|
write_short_http_response(conn, 404, "Not found");
|
2005-08-24 00:27:17 +02:00
|
|
|
|
|
|
|
done:
|
2004-09-27 05:39:30 +02:00
|
|
|
tor_free(url);
|
2003-12-17 10:42:28 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Called when a dirserver receives data on a directory connection;
|
2004-05-05 04:50:38 +02:00
|
|
|
* looks for an HTTP request. If the request is complete, remove it
|
|
|
|
* from the inbuf, try to process it; otherwise, leave it on the
|
|
|
|
* buffer. Return a 0 on success, or -1 on error.
|
|
|
|
*/
|
2016-12-14 01:12:34 +01:00
|
|
|
STATIC int
|
2006-07-26 21:07:26 +02:00
|
|
|
directory_handle_command(dir_connection_t *conn)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2003-12-17 10:42:28 +01:00
|
|
|
char *headers=NULL, *body=NULL;
|
2004-10-14 04:47:09 +02:00
|
|
|
size_t body_len=0;
|
2003-12-17 10:42:28 +01:00
|
|
|
int r;
|
|
|
|
|
2004-10-17 00:14:52 +02:00
|
|
|
tor_assert(conn);
|
2012-10-12 18:22:13 +02:00
|
|
|
tor_assert(conn->base_.type == CONN_TYPE_DIR);
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2009-08-09 20:40:28 +02:00
|
|
|
switch (connection_fetch_from_buf_http(TO_CONN(conn),
|
2004-11-28 12:39:53 +01:00
|
|
|
&headers, MAX_HEADERS_SIZE,
|
2007-02-16 21:00:50 +01:00
|
|
|
&body, &body_len, MAX_DIR_UL_SIZE, 0)) {
|
2003-09-17 22:09:06 +02:00
|
|
|
case -1: /* overflow */
|
2006-02-13 10:37:53 +01:00
|
|
|
log_warn(LD_DIRSERV,
|
2009-11-22 05:09:24 +01:00
|
|
|
"Request too large from address '%s' to DirPort. Closing.",
|
2012-10-12 18:22:13 +02:00
|
|
|
safe_str(conn->base_.address));
|
2003-09-17 22:09:06 +02:00
|
|
|
return -1;
|
|
|
|
case 0:
|
2006-02-13 10:37:53 +01:00
|
|
|
log_debug(LD_DIRSERV,"command not all here yet.");
|
2003-09-17 22:09:06 +02:00
|
|
|
return 0;
|
|
|
|
/* case 1, fall through */
|
2002-09-26 14:09:10 +02:00
|
|
|
}
|
|
|
|
|
2006-07-26 21:07:26 +02:00
|
|
|
http_set_address_origin(headers, TO_CONN(conn));
|
2014-09-09 16:22:01 +02:00
|
|
|
// we should escape headers here as well,
|
|
|
|
// but we can't call escaped() twice, as it uses the same buffer
|
|
|
|
//log_debug(LD_DIRSERV,"headers %s, body %s.", headers, escaped(body));
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2004-11-28 10:05:49 +01:00
|
|
|
if (!strncasecmp(headers,"GET",3))
|
2004-03-31 07:01:30 +02:00
|
|
|
r = directory_handle_command_get(conn, headers, body, body_len);
|
2003-12-17 10:42:28 +01:00
|
|
|
else if (!strncasecmp(headers,"POST",4))
|
2004-03-31 07:01:30 +02:00
|
|
|
r = directory_handle_command_post(conn, headers, body, body_len);
|
2003-12-17 10:42:28 +01:00
|
|
|
else {
|
2006-03-12 05:36:17 +01:00
|
|
|
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
|
|
|
|
"Got headers %s with unknown command. Closing.",
|
|
|
|
escaped(headers));
|
2003-12-17 10:42:28 +01:00
|
|
|
r = -1;
|
2002-09-26 14:09:10 +02:00
|
|
|
}
|
|
|
|
|
2003-12-17 10:42:28 +01:00
|
|
|
tor_free(headers); tor_free(body);
|
|
|
|
return r;
|
2002-09-26 14:09:10 +02:00
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Write handler for directory connections; called when all data has
|
2004-05-12 21:17:09 +02:00
|
|
|
* been flushed. Close the connection or wait for a response as
|
|
|
|
* appropriate.
|
2004-05-05 04:50:38 +02:00
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
int
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_dir_finished_flushing(dir_connection_t *conn)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2004-10-17 00:14:52 +02:00
|
|
|
tor_assert(conn);
|
2012-10-12 18:22:13 +02:00
|
|
|
tor_assert(conn->base_.type == CONN_TYPE_DIR);
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2009-07-12 16:33:31 +02:00
|
|
|
/* Note that we have finished writing the directory response. For direct
|
2015-11-23 13:40:13 +01:00
|
|
|
* connections this means we're done; for tunneled connections it's only
|
2009-07-12 16:33:31 +02:00
|
|
|
* an intermediate step. */
|
2012-09-04 09:33:16 +02:00
|
|
|
if (conn->dirreq_id)
|
|
|
|
geoip_change_dirreq_state(conn->dirreq_id, DIRREQ_TUNNELED,
|
2009-07-14 22:24:50 +02:00
|
|
|
DIRREQ_FLUSHING_DIR_CONN_FINISHED);
|
2009-07-12 16:33:31 +02:00
|
|
|
else
|
|
|
|
geoip_change_dirreq_state(TO_CONN(conn)->global_identifier,
|
2009-07-14 22:24:50 +02:00
|
|
|
DIRREQ_DIRECT,
|
|
|
|
DIRREQ_FLUSHING_DIR_CONN_FINISHED);
|
2012-10-12 18:22:13 +02:00
|
|
|
switch (conn->base_.state) {
|
2009-08-11 21:16:16 +02:00
|
|
|
case DIR_CONN_STATE_CONNECTING:
|
2004-03-31 00:57:49 +02:00
|
|
|
case DIR_CONN_STATE_CLIENT_SENDING:
|
2006-02-13 10:37:53 +01:00
|
|
|
log_debug(LD_DIR,"client finished sending command.");
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.state = DIR_CONN_STATE_CLIENT_READING;
|
2002-09-26 14:09:10 +02:00
|
|
|
return 0;
|
2003-09-17 22:09:06 +02:00
|
|
|
case DIR_CONN_STATE_SERVER_WRITING:
|
2017-03-13 20:38:20 +01:00
|
|
|
if (conn->spool) {
|
2011-08-26 22:10:17 +02:00
|
|
|
log_warn(LD_BUG, "Emptied a dirserv buffer, but it's still spooling!");
|
|
|
|
connection_mark_for_close(TO_CONN(conn));
|
|
|
|
} else {
|
|
|
|
log_debug(LD_DIRSERV, "Finished writing server response. Closing.");
|
|
|
|
connection_mark_for_close(TO_CONN(conn));
|
|
|
|
}
|
2004-02-28 05:11:53 +01:00
|
|
|
return 0;
|
2002-09-26 14:09:10 +02:00
|
|
|
default:
|
2007-03-04 21:11:46 +01:00
|
|
|
log_warn(LD_BUG,"called in unexpected state %d.",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.state);
|
2005-04-26 20:52:16 +02:00
|
|
|
tor_fragile_assert();
|
2003-09-26 12:03:50 +02:00
|
|
|
return -1;
|
2002-09-26 14:09:10 +02:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-13 08:54:31 +02:00
|
|
|
/* We just got a new consensus! If there are other in-progress requests
|
|
|
|
* for this consensus flavor (for example because we launched several in
|
|
|
|
* parallel), cancel them.
|
2016-03-26 01:08:39 +01:00
|
|
|
*
|
2016-04-13 08:54:31 +02:00
|
|
|
* We do this check here (not just in
|
|
|
|
* connection_ap_handshake_attach_circuit()) to handle the edge case where
|
|
|
|
* a consensus fetch begins and ends before some other one tries to attach to
|
|
|
|
* a circuit, in which case the other one won't know that we're all happy now.
|
2016-03-26 01:08:39 +01:00
|
|
|
*
|
2016-04-13 08:54:31 +02:00
|
|
|
* Don't mark the conn that just gave us the consensus -- otherwise we
|
|
|
|
* would end up double-marking it when it cleans itself up.
|
2016-03-26 01:08:39 +01:00
|
|
|
*/
|
2016-04-13 08:54:31 +02:00
|
|
|
static void
|
|
|
|
connection_dir_close_consensus_fetches(dir_connection_t *except_this_one,
|
|
|
|
const char *resource)
|
2015-12-07 08:07:44 +01:00
|
|
|
{
|
2016-04-13 08:54:31 +02:00
|
|
|
smartlist_t *conns_to_close =
|
|
|
|
connection_dir_list_by_purpose_and_resource(DIR_PURPOSE_FETCH_CONSENSUS,
|
|
|
|
resource);
|
|
|
|
SMARTLIST_FOREACH_BEGIN(conns_to_close, dir_connection_t *, d) {
|
|
|
|
if (d == except_this_one)
|
2015-12-07 08:07:44 +01:00
|
|
|
continue;
|
2016-04-13 08:54:31 +02:00
|
|
|
log_info(LD_DIR, "Closing consensus fetch (to %s) since one "
|
|
|
|
"has just arrived.", TO_CONN(d)->address);
|
|
|
|
connection_mark_for_close(TO_CONN(d));
|
2015-12-07 08:07:44 +01:00
|
|
|
} SMARTLIST_FOREACH_END(d);
|
2016-04-13 08:54:31 +02:00
|
|
|
smartlist_free(conns_to_close);
|
2015-12-07 08:07:44 +01:00
|
|
|
}
|
|
|
|
|
2004-05-12 21:17:09 +02:00
|
|
|
/** Connected handler for directory connections: begin sending data to the
|
2016-04-13 09:13:12 +02:00
|
|
|
* server, and return 0.
|
2015-12-07 08:07:44 +01:00
|
|
|
* Only used when connections don't immediately connect. */
|
2005-06-11 20:52:12 +02:00
|
|
|
int
|
2006-07-26 21:07:26 +02:00
|
|
|
connection_dir_finished_connecting(dir_connection_t *conn)
|
2004-05-12 21:17:09 +02:00
|
|
|
{
|
2004-10-17 00:14:52 +02:00
|
|
|
tor_assert(conn);
|
2012-10-12 18:22:13 +02:00
|
|
|
tor_assert(conn->base_.type == CONN_TYPE_DIR);
|
|
|
|
tor_assert(conn->base_.state == DIR_CONN_STATE_CONNECTING);
|
2004-05-12 21:17:09 +02:00
|
|
|
|
2006-02-13 10:37:53 +01:00
|
|
|
log_debug(LD_HTTP,"Dir connection to router %s:%u established.",
|
2012-10-12 18:22:13 +02:00
|
|
|
conn->base_.address,conn->base_.port);
|
2004-05-12 21:17:09 +02:00
|
|
|
|
2015-12-07 08:07:44 +01:00
|
|
|
/* start flushing conn */
|
|
|
|
conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
|
2004-05-12 21:17:09 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2005-06-09 21:03:31 +02:00
|
|
|
|
2013-05-24 09:48:15 +02:00
|
|
|
/** Decide which download schedule we want to use based on descriptor type
|
2015-12-07 07:55:38 +01:00
|
|
|
* in <b>dls</b> and <b>options</b>.
|
2018-04-18 17:26:40 +02:00
|
|
|
*
|
|
|
|
* Then, return the initial delay for that download schedule, in seconds.
|
|
|
|
*
|
2015-12-07 07:55:38 +01:00
|
|
|
* Helper function for download_status_increment_failure(),
|
|
|
|
* download_status_reset(), and download_status_increment_attempt(). */
|
2018-04-18 17:19:14 +02:00
|
|
|
STATIC int
|
2018-04-18 17:26:40 +02:00
|
|
|
find_dl_min_delay(const download_status_t *dls, const or_options_t *options)
|
2008-12-27 08:30:47 +01:00
|
|
|
{
|
2018-04-18 17:26:40 +02:00
|
|
|
tor_assert(dls);
|
|
|
|
tor_assert(options);
|
|
|
|
|
2008-12-27 08:30:47 +01:00
|
|
|
switch (dls->schedule) {
|
|
|
|
case DL_SCHED_GENERIC:
|
2017-09-11 06:21:48 +02:00
|
|
|
/* Any other directory document */
|
|
|
|
if (dir_server_mode(options)) {
|
|
|
|
/* A directory authority or directory mirror */
|
2018-04-18 16:53:39 +02:00
|
|
|
return options->TestingServerDownloadInitialDelay;
|
2015-12-07 07:55:38 +01:00
|
|
|
} else {
|
2018-04-18 16:53:39 +02:00
|
|
|
return options->TestingClientDownloadInitialDelay;
|
2015-12-07 07:55:38 +01:00
|
|
|
}
|
2008-12-27 08:30:47 +01:00
|
|
|
case DL_SCHED_CONSENSUS:
|
2017-09-11 06:21:48 +02:00
|
|
|
if (!networkstatus_consensus_can_use_multiple_directories(options)) {
|
|
|
|
/* A public relay */
|
2018-04-18 16:53:39 +02:00
|
|
|
return options->TestingServerConsensusDownloadInitialDelay;
|
2015-12-07 07:55:38 +01:00
|
|
|
} else {
|
2017-09-11 06:21:48 +02:00
|
|
|
/* A client or bridge */
|
|
|
|
if (networkstatus_consensus_is_bootstrapping(time(NULL))) {
|
|
|
|
/* During bootstrapping */
|
|
|
|
if (!networkstatus_consensus_can_use_extra_fallbacks(options)) {
|
2015-12-07 07:55:38 +01:00
|
|
|
/* A bootstrapping client without extra fallback directories */
|
2018-04-18 17:27:44 +02:00
|
|
|
return options->
|
|
|
|
ClientBootstrapConsensusAuthorityOnlyDownloadInitialDelay;
|
2015-12-07 07:55:38 +01:00
|
|
|
} else if (dls->want_authority) {
|
|
|
|
/* A bootstrapping client with extra fallback directories, but
|
|
|
|
* connecting to an authority */
|
|
|
|
return
|
2018-04-18 16:53:39 +02:00
|
|
|
options->ClientBootstrapConsensusAuthorityDownloadInitialDelay;
|
2015-12-07 07:55:38 +01:00
|
|
|
} else {
|
|
|
|
/* A bootstrapping client connecting to extra fallback directories
|
|
|
|
*/
|
|
|
|
return
|
2018-04-18 16:53:39 +02:00
|
|
|
options->ClientBootstrapConsensusFallbackDownloadInitialDelay;
|
2015-12-07 07:55:38 +01:00
|
|
|
}
|
|
|
|
} else {
|
2017-09-11 06:21:48 +02:00
|
|
|
/* A client with a reasonably live consensus, with or without
|
|
|
|
* certificates */
|
2018-04-18 16:53:39 +02:00
|
|
|
return options->TestingClientConsensusDownloadInitialDelay;
|
2015-12-07 07:55:38 +01:00
|
|
|
}
|
|
|
|
}
|
2008-12-27 08:30:47 +01:00
|
|
|
case DL_SCHED_BRIDGE:
|
2017-12-10 16:29:05 +01:00
|
|
|
if (options->UseBridges && num_bridges_usable(0) > 0) {
|
|
|
|
/* A bridge client that is sure that one or more of its bridges are
|
|
|
|
* running can afford to wait longer to update bridge descriptors. */
|
2018-04-18 16:53:39 +02:00
|
|
|
return options->TestingBridgeDownloadInitialDelay;
|
2017-09-11 05:56:35 +02:00
|
|
|
} else {
|
2017-12-10 16:29:05 +01:00
|
|
|
/* A bridge client which might have no running bridges, must try to
|
|
|
|
* get bridge descriptors straight away. */
|
2018-04-18 16:53:39 +02:00
|
|
|
return options->TestingBridgeBootstrapDownloadInitialDelay;
|
2017-09-11 05:56:35 +02:00
|
|
|
}
|
2008-12-27 08:30:47 +01:00
|
|
|
default:
|
|
|
|
tor_assert(0);
|
|
|
|
}
|
2015-05-30 08:03:50 +02:00
|
|
|
|
|
|
|
/* Impossible, but gcc will fail with -Werror without a `return`. */
|
2018-04-18 17:19:14 +02:00
|
|
|
return 0;
|
2008-12-27 08:30:47 +01:00
|
|
|
}
|
2007-10-24 21:53:08 +02:00
|
|
|
|
2017-10-17 19:24:40 +02:00
|
|
|
/** As next_random_exponential_delay() below, but does not compute a random
|
|
|
|
* value. Instead, compute the range of values that
|
|
|
|
* next_random_exponential_delay() should use when computing its random value.
|
|
|
|
* Store the low bound into *<b>low_bound_out</b>, and the high bound into
|
|
|
|
* *<b>high_bound_out</b>. Guarantees that the low bound is strictly less
|
|
|
|
* than the high bound. */
|
|
|
|
STATIC void
|
|
|
|
next_random_exponential_delay_range(int *low_bound_out,
|
|
|
|
int *high_bound_out,
|
|
|
|
int delay,
|
|
|
|
int base_delay)
|
|
|
|
{
|
|
|
|
// This is the "decorrelated jitter" approach, from
|
|
|
|
// https://www.awsarchitectureblog.com/2015/03/backoff.html
|
|
|
|
// The formula is
|
|
|
|
// sleep = min(cap, random_between(base, sleep * 3))
|
|
|
|
|
|
|
|
const int delay_times_3 = delay < INT_MAX/3 ? delay * 3 : INT_MAX;
|
|
|
|
*low_bound_out = base_delay;
|
|
|
|
if (delay_times_3 > base_delay) {
|
|
|
|
*high_bound_out = delay_times_3;
|
|
|
|
} else {
|
|
|
|
*high_bound_out = base_delay+1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Advance one delay step. The algorithm will generate a random delay,
|
|
|
|
* such that each failure is possibly (random) longer than the ones before.
|
|
|
|
*
|
2017-07-04 17:27:17 +02:00
|
|
|
* We then clamp that value to be no larger than max_delay, and return it.
|
2016-06-20 16:10:02 +02:00
|
|
|
*
|
2017-10-17 19:24:40 +02:00
|
|
|
* The <b>base_delay</b> parameter is lowest possible delay time (can't be
|
|
|
|
* zero); the <b>backoff_position</b> parameter is the number of times we've
|
|
|
|
* generated a delay; and the <b>delay</b> argument is the most recently used
|
|
|
|
* delay.
|
2016-06-18 20:23:55 +02:00
|
|
|
*/
|
|
|
|
STATIC int
|
2017-10-17 19:24:40 +02:00
|
|
|
next_random_exponential_delay(int delay,
|
2018-01-25 22:05:20 +01:00
|
|
|
int base_delay)
|
2016-06-18 20:23:55 +02:00
|
|
|
{
|
2016-06-20 16:10:02 +02:00
|
|
|
/* Check preconditions */
|
|
|
|
if (BUG(delay < 0))
|
|
|
|
delay = 0;
|
|
|
|
|
2017-10-17 19:24:40 +02:00
|
|
|
if (base_delay < 1)
|
|
|
|
base_delay = 1;
|
2016-06-20 16:10:02 +02:00
|
|
|
|
2018-01-25 22:05:20 +01:00
|
|
|
int low_bound=0, high_bound=INT_MAX;
|
2016-06-18 20:23:55 +02:00
|
|
|
|
2017-10-17 19:24:40 +02:00
|
|
|
next_random_exponential_delay_range(&low_bound, &high_bound,
|
|
|
|
delay, base_delay);
|
2016-06-18 20:23:55 +02:00
|
|
|
|
2018-01-25 22:05:20 +01:00
|
|
|
return crypto_rand_int_range(low_bound, high_bound);
|
2016-06-18 20:23:55 +02:00
|
|
|
}
|
|
|
|
|
2018-01-25 22:05:20 +01:00
|
|
|
/** Find the current delay for dls based on min_delay.
|
2018-01-25 21:51:13 +01:00
|
|
|
*
|
2016-06-18 19:11:32 +02:00
|
|
|
* This function sets dls->next_attempt_at based on now, and returns the delay.
|
2015-12-07 07:55:38 +01:00
|
|
|
* Helper for download_status_increment_failure and
|
|
|
|
* download_status_increment_attempt. */
|
|
|
|
STATIC int
|
|
|
|
download_status_schedule_get_delay(download_status_t *dls,
|
2018-01-25 22:05:20 +01:00
|
|
|
int min_delay,
|
2015-12-07 07:55:38 +01:00
|
|
|
time_t now)
|
|
|
|
{
|
|
|
|
tor_assert(dls);
|
2016-06-12 21:07:11 +02:00
|
|
|
/* If we're using random exponential backoff, we do need min/max delay */
|
2018-01-25 22:05:20 +01:00
|
|
|
tor_assert(min_delay >= 0);
|
2015-12-07 07:55:38 +01:00
|
|
|
|
|
|
|
int delay = INT_MAX;
|
|
|
|
uint8_t dls_schedule_position = (dls->increment_on
|
|
|
|
== DL_SCHED_INCREMENT_ATTEMPT
|
|
|
|
? dls->n_download_attempts
|
|
|
|
: dls->n_download_failures);
|
|
|
|
|
2018-01-25 21:52:33 +01:00
|
|
|
/* Check if we missed a reset somehow */
|
|
|
|
IF_BUG_ONCE(dls->last_backoff_position > dls_schedule_position) {
|
|
|
|
dls->last_backoff_position = 0;
|
|
|
|
dls->last_delay_used = 0;
|
|
|
|
}
|
2016-06-12 21:07:11 +02:00
|
|
|
|
2018-01-25 21:52:33 +01:00
|
|
|
if (dls_schedule_position > 0) {
|
|
|
|
delay = dls->last_delay_used;
|
2016-06-12 21:07:11 +02:00
|
|
|
|
2018-01-25 21:52:33 +01:00
|
|
|
while (dls->last_backoff_position < dls_schedule_position) {
|
|
|
|
/* Do one increment step */
|
2018-01-25 22:05:20 +01:00
|
|
|
delay = next_random_exponential_delay(delay, min_delay);
|
2018-01-25 21:52:33 +01:00
|
|
|
/* Update our position */
|
|
|
|
++(dls->last_backoff_position);
|
2016-06-12 21:07:11 +02:00
|
|
|
}
|
2018-01-25 21:52:33 +01:00
|
|
|
} else {
|
|
|
|
/* If we're just starting out, use the minimum delay */
|
|
|
|
delay = min_delay;
|
|
|
|
}
|
2016-06-12 21:07:11 +02:00
|
|
|
|
2018-01-25 21:52:33 +01:00
|
|
|
/* Clamp it within min/max if we have them */
|
|
|
|
if (min_delay >= 0 && delay < min_delay) delay = min_delay;
|
2016-06-12 21:07:11 +02:00
|
|
|
|
2018-01-25 21:52:33 +01:00
|
|
|
/* Store it for next time */
|
|
|
|
dls->last_backoff_position = dls_schedule_position;
|
|
|
|
dls->last_delay_used = delay;
|
2015-12-07 07:55:38 +01:00
|
|
|
|
|
|
|
/* A negative delay makes no sense. Knowing that delay is
|
|
|
|
* non-negative allows us to safely do the wrapping check below. */
|
|
|
|
tor_assert(delay >= 0);
|
|
|
|
|
2016-11-07 01:50:08 +01:00
|
|
|
/* Avoid now+delay overflowing TIME_MAX, by comparing with a subtraction
|
2015-12-07 07:55:38 +01:00
|
|
|
* that won't overflow (since delay is non-negative). */
|
2016-11-07 01:50:08 +01:00
|
|
|
if (delay < INT_MAX && now <= TIME_MAX - delay) {
|
2015-12-07 07:55:38 +01:00
|
|
|
dls->next_attempt_at = now+delay;
|
|
|
|
} else {
|
|
|
|
dls->next_attempt_at = TIME_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
return delay;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Log a debug message about item, which increments on increment_action, has
|
|
|
|
* incremented dls_n_download_increments times. The message varies based on
|
|
|
|
* was_schedule_incremented (if not, not_incremented_response is logged), and
|
|
|
|
* the values of increment, dls_next_attempt_at, and now.
|
|
|
|
* Helper for download_status_increment_failure and
|
|
|
|
* download_status_increment_attempt. */
|
|
|
|
static void
|
|
|
|
download_status_log_helper(const char *item, int was_schedule_incremented,
|
|
|
|
const char *increment_action,
|
|
|
|
const char *not_incremented_response,
|
|
|
|
uint8_t dls_n_download_increments, int increment,
|
|
|
|
time_t dls_next_attempt_at, time_t now)
|
|
|
|
{
|
|
|
|
if (item) {
|
|
|
|
if (!was_schedule_incremented)
|
|
|
|
log_debug(LD_DIR, "%s %s %d time(s); I'll try again %s.",
|
|
|
|
item, increment_action, (int)dls_n_download_increments,
|
|
|
|
not_incremented_response);
|
|
|
|
else if (increment == 0)
|
|
|
|
log_debug(LD_DIR, "%s %s %d time(s); I'll try again immediately.",
|
|
|
|
item, increment_action, (int)dls_n_download_increments);
|
|
|
|
else if (dls_next_attempt_at < TIME_MAX)
|
|
|
|
log_debug(LD_DIR, "%s %s %d time(s); I'll try again in %d seconds.",
|
|
|
|
item, increment_action, (int)dls_n_download_increments,
|
|
|
|
(int)(dls_next_attempt_at-now));
|
|
|
|
else
|
|
|
|
log_debug(LD_DIR, "%s %s %d time(s); Giving up for a while.",
|
|
|
|
item, increment_action, (int)dls_n_download_increments);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Determine when a failed download attempt should be retried.
|
|
|
|
* Called when an attempt to download <b>dls</b> has failed with HTTP status
|
2007-10-18 16:19:51 +02:00
|
|
|
* <b>status_code</b>. Increment the failure count (if the code indicates a
|
2015-12-07 07:55:38 +01:00
|
|
|
* real failure, or if we're a server) and set <b>dls</b>-\>next_attempt_at to
|
|
|
|
* an appropriate time in the future and return it.
|
|
|
|
* If <b>dls->increment_on</b> is DL_SCHED_INCREMENT_ATTEMPT, increment the
|
|
|
|
* failure count, and return a time in the far future for the next attempt (to
|
|
|
|
* avoid an immediate retry). */
|
2007-10-09 17:27:45 +02:00
|
|
|
time_t
|
|
|
|
download_status_increment_failure(download_status_t *dls, int status_code,
|
|
|
|
const char *item, int server, time_t now)
|
|
|
|
{
|
2016-11-07 15:43:12 +01:00
|
|
|
(void) status_code; // XXXX no longer used.
|
|
|
|
(void) server; // XXXX no longer used.
|
2015-12-07 07:55:38 +01:00
|
|
|
int increment = -1;
|
2018-01-25 22:05:20 +01:00
|
|
|
int min_delay = 0;
|
2016-06-12 21:07:11 +02:00
|
|
|
|
2007-10-09 17:27:45 +02:00
|
|
|
tor_assert(dls);
|
2015-12-07 07:55:38 +01:00
|
|
|
|
2017-05-26 08:16:37 +02:00
|
|
|
/* dls wasn't reset before it was used */
|
|
|
|
if (dls->next_attempt_at == 0) {
|
|
|
|
download_status_reset(dls);
|
|
|
|
}
|
|
|
|
|
2016-11-07 15:43:12 +01:00
|
|
|
/* count the failure */
|
|
|
|
if (dls->n_download_failures < IMPOSSIBLE_TO_DOWNLOAD-1) {
|
|
|
|
++dls->n_download_failures;
|
2008-12-23 22:17:52 +01:00
|
|
|
}
|
2007-10-24 21:53:08 +02:00
|
|
|
|
2015-12-07 07:55:38 +01:00
|
|
|
if (dls->increment_on == DL_SCHED_INCREMENT_FAILURE) {
|
|
|
|
/* We don't find out that a failure-based schedule has attempted a
|
|
|
|
* connection until that connection fails.
|
|
|
|
* We'll never find out about successful connections, but this doesn't
|
|
|
|
* matter, because schedules are reset after a successful download.
|
|
|
|
*/
|
|
|
|
if (dls->n_download_attempts < IMPOSSIBLE_TO_DOWNLOAD-1)
|
|
|
|
++dls->n_download_attempts;
|
2007-10-24 21:53:08 +02:00
|
|
|
|
2015-12-07 07:55:38 +01:00
|
|
|
/* only return a failure retry time if this schedule increments on failures
|
|
|
|
*/
|
2018-01-25 22:05:20 +01:00
|
|
|
min_delay = find_dl_min_delay(dls, get_options());
|
|
|
|
increment = download_status_schedule_get_delay(dls, min_delay, now);
|
2015-12-07 07:55:38 +01:00
|
|
|
}
|
2007-10-24 21:53:08 +02:00
|
|
|
|
2015-12-07 07:55:38 +01:00
|
|
|
download_status_log_helper(item, !dls->increment_on, "failed",
|
|
|
|
"concurrently", dls->n_download_failures,
|
2017-07-04 17:29:35 +02:00
|
|
|
increment,
|
|
|
|
download_status_get_next_attempt_at(dls),
|
|
|
|
now);
|
2007-10-24 21:53:08 +02:00
|
|
|
|
2015-12-07 07:55:38 +01:00
|
|
|
if (dls->increment_on == DL_SCHED_INCREMENT_ATTEMPT) {
|
|
|
|
/* stop this schedule retrying on failure, it will launch concurrent
|
|
|
|
* connections instead */
|
|
|
|
return TIME_MAX;
|
|
|
|
} else {
|
2017-07-04 17:29:35 +02:00
|
|
|
return download_status_get_next_attempt_at(dls);
|
2015-12-07 07:55:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Determine when the next download attempt should be made when using an
|
|
|
|
* attempt-based (potentially concurrent) download schedule.
|
|
|
|
* Called when an attempt to download <b>dls</b> is being initiated.
|
|
|
|
* Increment the attempt count and set <b>dls</b>-\>next_attempt_at to an
|
|
|
|
* appropriate time in the future and return it.
|
|
|
|
* If <b>dls->increment_on</b> is DL_SCHED_INCREMENT_FAILURE, don't increment
|
|
|
|
* the attempts, and return a time in the far future (to avoid launching a
|
|
|
|
* concurrent attempt). */
|
|
|
|
time_t
|
|
|
|
download_status_increment_attempt(download_status_t *dls, const char *item,
|
|
|
|
time_t now)
|
|
|
|
{
|
|
|
|
int delay = -1;
|
2018-01-25 22:05:20 +01:00
|
|
|
int min_delay = 0;
|
2016-06-12 21:07:11 +02:00
|
|
|
|
2015-12-07 07:55:38 +01:00
|
|
|
tor_assert(dls);
|
|
|
|
|
2017-05-26 08:16:37 +02:00
|
|
|
/* dls wasn't reset before it was used */
|
|
|
|
if (dls->next_attempt_at == 0) {
|
|
|
|
download_status_reset(dls);
|
|
|
|
}
|
|
|
|
|
2015-12-07 07:55:38 +01:00
|
|
|
if (dls->increment_on == DL_SCHED_INCREMENT_FAILURE) {
|
|
|
|
/* this schedule should retry on failure, and not launch any concurrent
|
|
|
|
attempts */
|
2016-11-07 02:01:24 +01:00
|
|
|
log_warn(LD_BUG, "Tried to launch an attempt-based connection on a "
|
2015-12-07 07:55:38 +01:00
|
|
|
"failure-based schedule.");
|
|
|
|
return TIME_MAX;
|
2007-10-09 17:27:45 +02:00
|
|
|
}
|
2015-12-07 07:55:38 +01:00
|
|
|
|
|
|
|
if (dls->n_download_attempts < IMPOSSIBLE_TO_DOWNLOAD-1)
|
|
|
|
++dls->n_download_attempts;
|
|
|
|
|
2018-01-25 22:05:20 +01:00
|
|
|
min_delay = find_dl_min_delay(dls, get_options());
|
|
|
|
delay = download_status_schedule_get_delay(dls, min_delay, now);
|
2015-12-07 07:55:38 +01:00
|
|
|
|
|
|
|
download_status_log_helper(item, dls->increment_on, "attempted",
|
|
|
|
"on failure", dls->n_download_attempts,
|
2017-07-04 17:29:35 +02:00
|
|
|
delay, download_status_get_next_attempt_at(dls),
|
|
|
|
now);
|
2015-12-07 07:55:38 +01:00
|
|
|
|
2017-07-04 17:29:35 +02:00
|
|
|
return download_status_get_next_attempt_at(dls);
|
2007-10-09 17:27:45 +02:00
|
|
|
}
|
|
|
|
|
2017-05-26 08:16:37 +02:00
|
|
|
static time_t
|
|
|
|
download_status_get_initial_delay_from_now(const download_status_t *dls)
|
|
|
|
{
|
|
|
|
/* We use constant initial delays, even in exponential backoff
|
|
|
|
* schedules. */
|
2018-04-18 17:19:14 +02:00
|
|
|
return time(NULL) + find_dl_min_delay(dls, get_options());
|
2007-10-09 17:27:45 +02:00
|
|
|
}
|
|
|
|
|
2007-10-18 16:19:51 +02:00
|
|
|
/** Reset <b>dls</b> so that it will be considered downloadable
|
2008-12-27 08:30:47 +01:00
|
|
|
* immediately, and/or to show that we don't need it anymore.
|
|
|
|
*
|
2015-12-07 07:55:38 +01:00
|
|
|
* Must be called to initialise a download schedule, otherwise the zeroth item
|
|
|
|
* in the schedule will never be used.
|
|
|
|
*
|
2008-12-27 08:30:47 +01:00
|
|
|
* (We find the zeroth element of the download schedule, and set
|
|
|
|
* next_attempt_at to be the appropriate offset from 'now'. In most
|
|
|
|
* cases this means setting it to 'now', so the item will be immediately
|
2017-08-29 06:04:24 +02:00
|
|
|
* downloadable; when using authorities with fallbacks, there is a few seconds'
|
|
|
|
* delay.) */
|
2007-10-09 17:27:45 +02:00
|
|
|
void
|
|
|
|
download_status_reset(download_status_t *dls)
|
|
|
|
{
|
2015-12-07 07:55:38 +01:00
|
|
|
if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD
|
|
|
|
|| dls->n_download_attempts == IMPOSSIBLE_TO_DOWNLOAD)
|
2014-10-07 15:34:28 +02:00
|
|
|
return; /* Don't reset this. */
|
|
|
|
|
2007-10-09 17:27:45 +02:00
|
|
|
dls->n_download_failures = 0;
|
2015-12-07 07:55:38 +01:00
|
|
|
dls->n_download_attempts = 0;
|
2017-05-26 08:16:37 +02:00
|
|
|
dls->next_attempt_at = download_status_get_initial_delay_from_now(dls);
|
2016-06-12 21:07:11 +02:00
|
|
|
dls->last_backoff_position = 0;
|
|
|
|
dls->last_delay_used = 0;
|
2015-12-07 07:55:38 +01:00
|
|
|
/* Don't reset dls->want_authority or dls->increment_on */
|
2007-10-09 17:27:45 +02:00
|
|
|
}
|
|
|
|
|
2010-09-02 22:42:18 +02:00
|
|
|
/** Return the number of failures on <b>dls</b> since the last success (if
|
|
|
|
* any). */
|
|
|
|
int
|
|
|
|
download_status_get_n_failures(const download_status_t *dls)
|
|
|
|
{
|
|
|
|
return dls->n_download_failures;
|
|
|
|
}
|
|
|
|
|
2015-12-07 07:55:38 +01:00
|
|
|
/** Return the number of attempts to download <b>dls</b> since the last success
|
|
|
|
* (if any). This can differ from download_status_get_n_failures() due to
|
|
|
|
* outstanding concurrent attempts. */
|
|
|
|
int
|
|
|
|
download_status_get_n_attempts(const download_status_t *dls)
|
|
|
|
{
|
|
|
|
return dls->n_download_attempts;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Return the next time to attempt to download <b>dls</b>. */
|
|
|
|
time_t
|
|
|
|
download_status_get_next_attempt_at(const download_status_t *dls)
|
|
|
|
{
|
2017-05-26 08:16:37 +02:00
|
|
|
/* dls wasn't reset before it was used */
|
|
|
|
if (dls->next_attempt_at == 0) {
|
|
|
|
/* so give the answer we would have given if it had been */
|
|
|
|
return download_status_get_initial_delay_from_now(dls);
|
|
|
|
}
|
|
|
|
|
2015-12-07 07:55:38 +01:00
|
|
|
return dls->next_attempt_at;
|
|
|
|
}
|
|
|
|
|
2007-05-29 19:31:13 +02:00
|
|
|
/** Called when one or more routerdesc (or extrainfo, if <b>was_extrainfo</b>)
|
2007-09-21 08:14:36 +02:00
|
|
|
* fetches have failed (with uppercase fingerprints listed in <b>failed</b>,
|
|
|
|
* either as descriptor digests or as identity digests based on
|
|
|
|
* <b>was_descriptor_digests</b>).
|
|
|
|
*/
|
2007-11-04 01:15:42 +01:00
|
|
|
static void
|
2007-05-18 23:19:19 +02:00
|
|
|
dir_routerdesc_download_failed(smartlist_t *failed, int status_code,
|
2007-11-04 01:15:42 +01:00
|
|
|
int router_purpose,
|
2007-09-21 08:14:36 +02:00
|
|
|
int was_extrainfo, int was_descriptor_digests)
|
2005-09-15 07:19:38 +02:00
|
|
|
{
|
2005-09-18 06:15:39 +02:00
|
|
|
char digest[DIGEST_LEN];
|
2005-09-22 08:34:29 +02:00
|
|
|
time_t now = time(NULL);
|
2007-12-01 05:58:53 +01:00
|
|
|
int server = directory_fetches_from_authorities(get_options());
|
2007-11-04 01:15:42 +01:00
|
|
|
if (!was_descriptor_digests) {
|
|
|
|
if (router_purpose == ROUTER_PURPOSE_BRIDGE) {
|
2010-08-31 21:45:44 +02:00
|
|
|
tor_assert(!was_extrainfo);
|
|
|
|
connection_dir_retry_bridges(failed);
|
2007-11-04 01:15:42 +01:00
|
|
|
}
|
|
|
|
return; /* FFFF should implement for other-than-router-purpose someday */
|
|
|
|
}
|
2012-07-17 15:33:38 +02:00
|
|
|
SMARTLIST_FOREACH_BEGIN(failed, const char *, cp) {
|
2007-05-18 23:19:53 +02:00
|
|
|
download_status_t *dls = NULL;
|
2016-06-17 16:41:45 +02:00
|
|
|
if (base16_decode(digest, DIGEST_LEN, cp, strlen(cp)) != DIGEST_LEN) {
|
2008-01-14 20:00:23 +01:00
|
|
|
log_warn(LD_BUG, "Malformed fingerprint in list: %s", escaped(cp));
|
|
|
|
continue;
|
|
|
|
}
|
2007-05-18 23:19:53 +02:00
|
|
|
if (was_extrainfo) {
|
|
|
|
signed_descriptor_t *sd =
|
|
|
|
router_get_by_extrainfo_digest(digest);
|
|
|
|
if (sd)
|
|
|
|
dls = &sd->ei_dl_status;
|
|
|
|
} else {
|
2007-10-16 01:15:24 +02:00
|
|
|
dls = router_get_dl_status_by_descriptor_digest(digest);
|
2007-05-18 23:19:53 +02:00
|
|
|
}
|
2018-01-31 21:08:46 +01:00
|
|
|
if (!dls)
|
2005-09-18 04:51:12 +02:00
|
|
|
continue;
|
2007-10-09 17:27:45 +02:00
|
|
|
download_status_increment_failure(dls, status_code, cp, server, now);
|
2012-07-17 15:33:38 +02:00
|
|
|
} SMARTLIST_FOREACH_END(cp);
|
2005-09-22 08:34:29 +02:00
|
|
|
|
2007-02-24 22:21:38 +01:00
|
|
|
/* No need to relaunch descriptor downloads here: we already do it
|
2007-11-04 01:15:42 +01:00
|
|
|
* every 10 or 60 seconds (FOO_DESCRIPTOR_RETRY_INTERVAL) in main.c. */
|
2005-09-15 07:19:38 +02:00
|
|
|
}
|
|
|
|
|
2017-11-06 13:48:22 +01:00
|
|
|
/** Called when a connection to download microdescriptors from relay with
|
|
|
|
* <b>dir_id</b> has failed in whole or in part. <b>failed</b> is a list
|
|
|
|
* of every microdesc digest we didn't get. <b>status_code</b> is the http
|
|
|
|
* status code we received. Reschedule the microdesc downloads as
|
|
|
|
* appropriate. */
|
2010-05-11 23:20:33 +02:00
|
|
|
static void
|
|
|
|
dir_microdesc_download_failed(smartlist_t *failed,
|
2017-11-06 13:48:22 +01:00
|
|
|
int status_code, const char *dir_id)
|
2010-05-11 23:20:33 +02:00
|
|
|
{
|
|
|
|
networkstatus_t *consensus
|
|
|
|
= networkstatus_get_latest_consensus_by_flavor(FLAV_MICRODESC);
|
|
|
|
routerstatus_t *rs;
|
|
|
|
download_status_t *dls;
|
|
|
|
time_t now = time(NULL);
|
|
|
|
int server = directory_fetches_from_authorities(get_options());
|
|
|
|
|
|
|
|
if (! consensus)
|
|
|
|
return;
|
2017-11-06 13:48:22 +01:00
|
|
|
|
|
|
|
/* We failed to fetch a microdescriptor from 'dir_id', note it down
|
|
|
|
* so that we don't try the same relay next time... */
|
|
|
|
microdesc_note_outdated_dirserver(dir_id);
|
|
|
|
|
2010-05-11 23:20:33 +02:00
|
|
|
SMARTLIST_FOREACH_BEGIN(failed, const char *, d) {
|
Initial conversion to use node_t throughout our codebase.
A node_t is an abstraction over routerstatus_t, routerinfo_t, and
microdesc_t. It should try to present a consistent interface to all
of them. There should be a node_t for a server whenever there is
* A routerinfo_t for it in the routerlist
* A routerstatus_t in the current_consensus.
(note that a microdesc_t alone isn't enough to make a node_t exist,
since microdescriptors aren't usable on their own.)
There are three ways to get a node_t right now: looking it up by ID,
looking it up by nickname, and iterating over the whole list of
microdescriptors.
All (or nearly all) functions that are supposed to return "a router"
-- especially those used in building connections and circuits --
should return a node_t, not a routerinfo_t or a routerstatus_t.
A node_t should hold all the *mutable* flags about a node. This
patch moves the is_foo flags from routerinfo_t into node_t. The
flags in routerstatus_t remain, but they get set from the consensus
and should not change.
Some other highlights of this patch are:
* Looking up routerinfo and routerstatus by nickname is now
unified and based on the "look up a node by nickname" function.
This tries to look only at the values from current consensus,
and not get confused by the routerinfo_t->is_named flag, which
could get set for other weird reasons. This changes the
behavior of how authorities (when acting as clients) deal with
nodes that have been listed by nickname.
* I tried not to artificially increase the size of the diff here
by moving functions around. As a result, some functions that
now operate on nodes are now in the wrong file -- they should
get moved to nodelist.c once this refactoring settles down.
This moving should happen as part of a patch that moves
functions AND NOTHING ELSE.
* Some old code is now left around inside #if 0/1 blocks, and
should get removed once I've verified that I don't want it
sitting around to see how we used to do things.
There are still some unimplemented functions: these are flagged
with "UNIMPLEMENTED_NODELIST()." I'll work on filling in the
implementation here, piece by piece.
I wish this patch could have been smaller, but there did not seem to
be any piece of it that was independent from the rest. Moving flags
forces many functions that once returned routerinfo_t * to return
node_t *, which forces their friends to change, and so on.
2010-09-29 21:00:41 +02:00
|
|
|
rs = router_get_mutable_consensus_status_by_descriptor_digest(consensus,d);
|
2010-05-11 23:20:33 +02:00
|
|
|
if (!rs)
|
|
|
|
continue;
|
|
|
|
dls = &rs->dl_status;
|
2017-11-06 13:48:22 +01:00
|
|
|
|
|
|
|
{ /* Increment the failure count for this md fetch */
|
2010-05-11 23:20:33 +02:00
|
|
|
char buf[BASE64_DIGEST256_LEN+1];
|
|
|
|
digest256_to_base64(buf, d);
|
2017-11-06 13:48:22 +01:00
|
|
|
log_info(LD_DIR, "Failed to download md %s from %s",
|
|
|
|
buf, hex_str(dir_id, DIGEST_LEN));
|
2010-05-11 23:20:33 +02:00
|
|
|
download_status_increment_failure(dls, status_code, buf,
|
|
|
|
server, now);
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(d);
|
|
|
|
}
|
|
|
|
|
2011-05-10 22:58:38 +02:00
|
|
|
/** Helper. Compare two fp_pair_t objects, and return negative, 0, or
|
|
|
|
* positive as appropriate. */
|
2008-12-12 20:05:36 +01:00
|
|
|
static int
|
2012-10-12 18:22:13 +02:00
|
|
|
compare_pairs_(const void **a, const void **b)
|
2008-12-12 20:05:36 +01:00
|
|
|
{
|
|
|
|
const fp_pair_t *fp1 = *a, *fp2 = *b;
|
|
|
|
int r;
|
2011-05-10 22:58:38 +02:00
|
|
|
if ((r = fast_memcmp(fp1->first, fp2->first, DIGEST_LEN)))
|
2008-12-12 20:05:36 +01:00
|
|
|
return r;
|
|
|
|
else
|
2011-05-10 22:58:38 +02:00
|
|
|
return fast_memcmp(fp1->second, fp2->second, DIGEST_LEN);
|
2008-12-12 20:05:36 +01:00
|
|
|
}
|
|
|
|
|
2008-12-17 23:58:20 +01:00
|
|
|
/** Divide a string <b>res</b> of the form FP1-FP2+FP3-FP4...[.z], where each
|
|
|
|
* FP is a hex-encoded fingerprint, into a sequence of distinct sorted
|
|
|
|
* fp_pair_t. Skip malformed pairs. On success, return 0 and add those
|
|
|
|
* fp_pair_t into <b>pairs_out</b>. On failure, return -1. */
|
2008-12-12 20:05:36 +01:00
|
|
|
int
|
|
|
|
dir_split_resource_into_fingerprint_pairs(const char *res,
|
|
|
|
smartlist_t *pairs_out)
|
|
|
|
{
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *pairs_tmp = smartlist_new();
|
|
|
|
smartlist_t *pairs_result = smartlist_new();
|
2008-12-12 20:05:36 +01:00
|
|
|
|
|
|
|
smartlist_split_string(pairs_tmp, res, "+", 0, 0);
|
|
|
|
if (smartlist_len(pairs_tmp)) {
|
|
|
|
char *last = smartlist_get(pairs_tmp,smartlist_len(pairs_tmp)-1);
|
|
|
|
size_t last_len = strlen(last);
|
|
|
|
if (last_len > 2 && !strcmp(last+last_len-2, ".z")) {
|
|
|
|
last[last_len-2] = '\0';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SMARTLIST_FOREACH_BEGIN(pairs_tmp, char *, cp) {
|
|
|
|
if (strlen(cp) != HEX_DIGEST_LEN*2+1) {
|
|
|
|
log_info(LD_DIR,
|
|
|
|
"Skipping digest pair %s with non-standard length.", escaped(cp));
|
|
|
|
} else if (cp[HEX_DIGEST_LEN] != '-') {
|
|
|
|
log_info(LD_DIR,
|
|
|
|
"Skipping digest pair %s with missing dash.", escaped(cp));
|
|
|
|
} else {
|
|
|
|
fp_pair_t pair;
|
2016-06-17 16:41:45 +02:00
|
|
|
if (base16_decode(pair.first, DIGEST_LEN,
|
|
|
|
cp, HEX_DIGEST_LEN) != DIGEST_LEN ||
|
|
|
|
base16_decode(pair.second,DIGEST_LEN,
|
|
|
|
cp+HEX_DIGEST_LEN+1, HEX_DIGEST_LEN) != DIGEST_LEN) {
|
2008-12-12 20:05:36 +01:00
|
|
|
log_info(LD_DIR, "Skipping non-decodable digest pair %s", escaped(cp));
|
|
|
|
} else {
|
|
|
|
smartlist_add(pairs_result, tor_memdup(&pair, sizeof(pair)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tor_free(cp);
|
|
|
|
} SMARTLIST_FOREACH_END(cp);
|
|
|
|
smartlist_free(pairs_tmp);
|
|
|
|
|
|
|
|
/* Uniq-and-sort */
|
2012-10-12 18:22:13 +02:00
|
|
|
smartlist_sort(pairs_result, compare_pairs_);
|
|
|
|
smartlist_uniq(pairs_result, compare_pairs_, tor_free_);
|
2008-12-12 20:05:36 +01:00
|
|
|
|
|
|
|
smartlist_add_all(pairs_out, pairs_result);
|
|
|
|
smartlist_free(pairs_result);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-02-02 21:06:43 +01:00
|
|
|
/** Given a directory <b>resource</b> request, containing zero
|
2005-09-30 22:04:55 +02:00
|
|
|
* or more strings separated by plus signs, followed optionally by ".z", store
|
|
|
|
* the strings, in order, into <b>fp_out</b>. If <b>compressed_out</b> is
|
2009-10-18 21:45:57 +02:00
|
|
|
* non-NULL, set it to 1 if the resource ends in ".z", else set it to 0.
|
|
|
|
*
|
|
|
|
* If (flags & DSR_HEX), then delete all elements that aren't hex digests, and
|
|
|
|
* decode the rest. If (flags & DSR_BASE64), then use "-" rather than "+" as
|
|
|
|
* a separator, delete all the elements that aren't base64-encoded digests,
|
|
|
|
* and decode the rest. If (flags & DSR_DIGEST256), these digests should be
|
|
|
|
* 256 bits long; else they should be 160.
|
|
|
|
*
|
|
|
|
* If (flags & DSR_SORT_UNIQ), then sort the list and remove all duplicates.
|
2005-09-30 22:04:55 +02:00
|
|
|
*/
|
2005-09-16 06:42:45 +02:00
|
|
|
int
|
|
|
|
dir_split_resource_into_fingerprints(const char *resource,
|
2005-10-14 06:56:20 +02:00
|
|
|
smartlist_t *fp_out, int *compressed_out,
|
2009-10-18 21:45:57 +02:00
|
|
|
int flags)
|
2005-09-16 06:42:45 +02:00
|
|
|
{
|
2009-10-18 21:45:57 +02:00
|
|
|
const int decode_hex = flags & DSR_HEX;
|
|
|
|
const int decode_base64 = flags & DSR_BASE64;
|
|
|
|
const int digests_are_256 = flags & DSR_DIGEST256;
|
|
|
|
const int sort_uniq = flags & DSR_SORT_UNIQ;
|
|
|
|
|
|
|
|
const int digest_len = digests_are_256 ? DIGEST256_LEN : DIGEST_LEN;
|
|
|
|
const int hex_digest_len = digests_are_256 ?
|
|
|
|
HEX_DIGEST256_LEN : HEX_DIGEST_LEN;
|
|
|
|
const int base64_digest_len = digests_are_256 ?
|
|
|
|
BASE64_DIGEST256_LEN : BASE64_DIGEST_LEN;
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *fp_tmp = smartlist_new();
|
2009-10-18 21:45:57 +02:00
|
|
|
|
|
|
|
tor_assert(!(decode_hex && decode_base64));
|
2005-10-28 01:16:08 +02:00
|
|
|
tor_assert(fp_out);
|
2009-10-18 21:45:57 +02:00
|
|
|
|
|
|
|
smartlist_split_string(fp_tmp, resource, decode_base64?"-":"+", 0, 0);
|
2005-09-16 06:42:45 +02:00
|
|
|
if (compressed_out)
|
|
|
|
*compressed_out = 0;
|
2006-06-21 06:57:12 +02:00
|
|
|
if (smartlist_len(fp_tmp)) {
|
|
|
|
char *last = smartlist_get(fp_tmp,smartlist_len(fp_tmp)-1);
|
2005-09-16 06:42:45 +02:00
|
|
|
size_t last_len = strlen(last);
|
|
|
|
if (last_len > 2 && !strcmp(last+last_len-2, ".z")) {
|
|
|
|
last[last_len-2] = '\0';
|
|
|
|
if (compressed_out)
|
|
|
|
*compressed_out = 1;
|
|
|
|
}
|
|
|
|
}
|
2009-10-18 21:45:57 +02:00
|
|
|
if (decode_hex || decode_base64) {
|
|
|
|
const size_t encoded_len = decode_hex ? hex_digest_len : base64_digest_len;
|
2005-10-14 06:56:20 +02:00
|
|
|
int i;
|
2005-10-25 09:03:22 +02:00
|
|
|
char *cp, *d = NULL;
|
2006-06-21 06:57:12 +02:00
|
|
|
for (i = 0; i < smartlist_len(fp_tmp); ++i) {
|
|
|
|
cp = smartlist_get(fp_tmp, i);
|
2009-10-18 21:45:57 +02:00
|
|
|
if (strlen(cp) != encoded_len) {
|
2006-02-13 10:37:53 +01:00
|
|
|
log_info(LD_DIR,
|
2006-03-05 10:50:26 +01:00
|
|
|
"Skipping digest %s with non-standard length.", escaped(cp));
|
2006-06-21 06:57:12 +02:00
|
|
|
smartlist_del_keeporder(fp_tmp, i--);
|
2005-10-25 09:03:22 +02:00
|
|
|
goto again;
|
2005-10-14 06:56:20 +02:00
|
|
|
}
|
2009-10-18 21:45:57 +02:00
|
|
|
d = tor_malloc_zero(digest_len);
|
|
|
|
if (decode_hex ?
|
2016-06-17 16:41:45 +02:00
|
|
|
(base16_decode(d, digest_len, cp, hex_digest_len) != digest_len) :
|
|
|
|
(base64_decode(d, digest_len, cp, base64_digest_len)
|
|
|
|
!= digest_len)) {
|
2009-10-18 21:45:57 +02:00
|
|
|
log_info(LD_DIR, "Skipping non-decodable digest %s", escaped(cp));
|
|
|
|
smartlist_del_keeporder(fp_tmp, i--);
|
|
|
|
goto again;
|
2005-10-14 06:56:20 +02:00
|
|
|
}
|
2006-06-21 06:57:12 +02:00
|
|
|
smartlist_set(fp_tmp, i, d);
|
2005-10-25 09:03:22 +02:00
|
|
|
d = NULL;
|
|
|
|
again:
|
|
|
|
tor_free(cp);
|
|
|
|
tor_free(d);
|
2005-10-14 06:56:20 +02:00
|
|
|
}
|
|
|
|
}
|
2006-06-21 06:57:12 +02:00
|
|
|
if (sort_uniq) {
|
2009-10-18 21:45:57 +02:00
|
|
|
if (decode_hex || decode_base64) {
|
|
|
|
if (digests_are_256) {
|
|
|
|
smartlist_sort_digests256(fp_tmp);
|
|
|
|
smartlist_uniq_digests256(fp_tmp);
|
|
|
|
} else {
|
|
|
|
smartlist_sort_digests(fp_tmp);
|
|
|
|
smartlist_uniq_digests(fp_tmp);
|
|
|
|
}
|
|
|
|
} else {
|
2006-06-21 06:57:12 +02:00
|
|
|
smartlist_sort_strings(fp_tmp);
|
2009-10-18 21:45:57 +02:00
|
|
|
smartlist_uniq_strings(fp_tmp);
|
2006-06-21 06:57:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
smartlist_add_all(fp_out, fp_tmp);
|
|
|
|
smartlist_free(fp_tmp);
|
2005-09-16 06:52:53 +02:00
|
|
|
return 0;
|
2005-09-16 06:42:45 +02:00
|
|
|
}
|
2016-02-10 03:20:59 +01:00
|
|
|
|
2017-03-13 20:38:20 +01:00
|
|
|
/** As dir_split_resource_into_fingerprints, but instead fills
|
|
|
|
* <b>spool_out</b> with a list of spoolable_resource_t for the resource
|
|
|
|
* identified through <b>source</b>. */
|
|
|
|
int
|
|
|
|
dir_split_resource_into_spoolable(const char *resource,
|
|
|
|
dir_spool_source_t source,
|
|
|
|
smartlist_t *spool_out,
|
|
|
|
int *compressed_out,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
smartlist_t *fingerprints = smartlist_new();
|
|
|
|
|
|
|
|
tor_assert(flags & (DSR_HEX|DSR_BASE64));
|
|
|
|
const size_t digest_len =
|
|
|
|
(flags & DSR_DIGEST256) ? DIGEST256_LEN : DIGEST_LEN;
|
|
|
|
|
|
|
|
int r = dir_split_resource_into_fingerprints(resource, fingerprints,
|
|
|
|
compressed_out, flags);
|
|
|
|
/* This is not a very efficient implementation XXXX */
|
|
|
|
SMARTLIST_FOREACH_BEGIN(fingerprints, uint8_t *, digest) {
|
|
|
|
spooled_resource_t *spooled =
|
|
|
|
spooled_resource_new(source, digest, digest_len);
|
|
|
|
if (spooled)
|
|
|
|
smartlist_add(spool_out, spooled);
|
|
|
|
tor_free(digest);
|
|
|
|
} SMARTLIST_FOREACH_END(digest);
|
|
|
|
|
|
|
|
smartlist_free(fingerprints);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|