2016-05-18 02:04:16 +02:00
|
|
|
/* Copyright (c) 2001 Matej Pfajfar.
|
2006-02-09 06:46:49 +01:00
|
|
|
* Copyright (c) 2001-2004, Roger Dingledine.
|
2007-12-12 22:09:01 +01:00
|
|
|
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
|
2018-06-20 14:13:28 +02:00
|
|
|
* Copyright (c) 2007-2018, The Tor Project, Inc. */
|
2004-05-10 19:30:51 +02:00
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* \file routerparse.c
|
2016-10-15 02:08:51 +02:00
|
|
|
* \brief Code to parse and validate router descriptors, consenus directories,
|
|
|
|
* and similar objects.
|
|
|
|
*
|
|
|
|
* The objects parsed by this module use a common text-based metaformat,
|
|
|
|
* documented in dir-spec.txt in torspec.git. This module is itself divided
|
|
|
|
* into two major kinds of function: code to handle the metaformat, and code
|
|
|
|
* to convert from particular instances of the metaformat into the
|
|
|
|
* objects that Tor uses.
|
|
|
|
*
|
|
|
|
* The generic parsing code works by calling a table-based tokenizer on the
|
|
|
|
* input string. Each token corresponds to a single line with a token, plus
|
|
|
|
* optional arguments on that line, plus an optional base-64 encoded object
|
|
|
|
* after that line. Each token has a definition in a table of token_rule_t
|
|
|
|
* entries that describes how many arguments it can take, whether it takes an
|
|
|
|
* object, how many times it may appear, whether it must appear first, and so
|
|
|
|
* on.
|
|
|
|
*
|
|
|
|
* The tokenizer function tokenize_string() converts its string input into a
|
|
|
|
* smartlist full of instances of directory_token_t, according to a provided
|
|
|
|
* table of token_rule_t.
|
|
|
|
*
|
|
|
|
* The generic parts of this module additionally include functions for
|
|
|
|
* finding the start and end of signed information inside a signed object, and
|
|
|
|
* computing the digest that will be signed.
|
|
|
|
*
|
|
|
|
* There are also functions for saving objects to disk that have caused
|
|
|
|
* parsing to fail.
|
|
|
|
*
|
|
|
|
* The specific parts of this module describe conversions between
|
|
|
|
* particular lists of directory_token_t and particular objects. The
|
|
|
|
* kinds of objects that can be parsed here are:
|
|
|
|
* <ul>
|
|
|
|
* <li>router descriptors (managed from routerlist.c)
|
|
|
|
* <li>extra-info documents (managed from routerlist.c)
|
|
|
|
* <li>microdescriptors (managed from microdesc.c)
|
|
|
|
* <li>vote and consensus networkstatus documents, and the routerstatus_t
|
|
|
|
* objects that they comprise (managed from networkstatus.c)
|
|
|
|
* <li>detached-signature objects used by authorities for gathering
|
|
|
|
* signatures on the networkstatus consensus (managed from dirvote.c)
|
|
|
|
* <li>authority key certificates (managed from routerlist.c)
|
|
|
|
* <li>hidden service descriptors (managed from rendcommon.c and rendcache.c)
|
|
|
|
* </ul>
|
2004-05-10 19:30:51 +02:00
|
|
|
**/
|
|
|
|
|
2018-10-01 18:08:09 +02:00
|
|
|
#define EXPOSE_ROUTERDESC_TOKEN_TABLE
|
2015-01-29 15:57:00 +01:00
|
|
|
|
2018-07-05 22:34:59 +02:00
|
|
|
#include "core/or/or.h"
|
|
|
|
#include "app/config/config.h"
|
2018-09-25 23:57:58 +02:00
|
|
|
#include "core/or/policies.h"
|
2018-10-01 18:22:47 +02:00
|
|
|
#include "core/or/versions.h"
|
2018-10-01 00:53:58 +02:00
|
|
|
#include "feature/dirparse/parsecommon.h"
|
2018-10-01 18:08:09 +02:00
|
|
|
#include "feature/dirparse/policy_parse.h"
|
2018-10-01 00:53:58 +02:00
|
|
|
#include "feature/dirparse/routerparse.h"
|
2018-10-01 18:17:33 +02:00
|
|
|
#include "feature/dirparse/sigcommon.h"
|
|
|
|
#include "feature/dirparse/unparseable.h"
|
2018-09-25 23:57:58 +02:00
|
|
|
#include "feature/nodelist/describe.h"
|
|
|
|
#include "feature/nodelist/nickname.h"
|
|
|
|
#include "feature/nodelist/routerinfo.h"
|
2018-07-05 22:34:59 +02:00
|
|
|
#include "feature/nodelist/routerlist.h"
|
|
|
|
#include "feature/nodelist/torcert.h"
|
2018-09-25 23:57:58 +02:00
|
|
|
#include "feature/relay/router.h"
|
2018-10-01 18:17:33 +02:00
|
|
|
#include "lib/crypt_ops/crypto_curve25519.h"
|
|
|
|
#include "lib/crypt_ops/crypto_ed25519.h"
|
2018-09-25 23:57:58 +02:00
|
|
|
#include "lib/crypt_ops/crypto_format.h"
|
|
|
|
#include "lib/memarea/memarea.h"
|
|
|
|
#include "lib/sandbox/sandbox.h"
|
2018-04-05 20:27:30 +02:00
|
|
|
|
2018-07-05 22:34:59 +02:00
|
|
|
#include "core/or/addr_policy_st.h"
|
|
|
|
#include "feature/nodelist/extrainfo_st.h"
|
|
|
|
#include "feature/nodelist/routerinfo_st.h"
|
|
|
|
#include "feature/nodelist/routerlist_st.h"
|
2018-06-15 18:18:17 +02:00
|
|
|
|
2004-05-10 19:30:51 +02:00
|
|
|
/****************************************************************************/
|
|
|
|
|
2012-06-05 06:17:54 +02:00
|
|
|
/** List of tokens recognized in router descriptors */
|
2018-10-01 18:08:09 +02:00
|
|
|
const token_rule_t routerdesc_token_table[] = {
|
2007-04-16 06:18:21 +02:00
|
|
|
T0N("reject", K_REJECT, ARGS, NO_OBJ ),
|
2007-05-15 00:51:05 +02:00
|
|
|
T0N("accept", K_ACCEPT, ARGS, NO_OBJ ),
|
2011-03-06 19:26:38 +01:00
|
|
|
T0N("reject6", K_REJECT6, ARGS, NO_OBJ ),
|
|
|
|
T0N("accept6", K_ACCEPT6, ARGS, NO_OBJ ),
|
2008-07-24 15:44:04 +02:00
|
|
|
T1_START( "router", K_ROUTER, GE(5), NO_OBJ ),
|
2012-10-25 03:59:55 +02:00
|
|
|
T01("ipv6-policy", K_IPV6_POLICY, CONCAT_ARGS, NO_OBJ),
|
2007-05-02 23:37:55 +02:00
|
|
|
T1( "signing-key", K_SIGNING_KEY, NO_ARGS, NEED_KEY_1024 ),
|
|
|
|
T1( "onion-key", K_ONION_KEY, NO_ARGS, NEED_KEY_1024 ),
|
2012-12-04 21:58:18 +01:00
|
|
|
T01("ntor-onion-key", K_ONION_KEY_NTOR, GE(1), NO_OBJ ),
|
2007-06-06 06:51:30 +02:00
|
|
|
T1_END( "router-signature", K_ROUTER_SIGNATURE, NO_ARGS, NEED_OBJ ),
|
2007-04-16 06:18:21 +02:00
|
|
|
T1( "published", K_PUBLISHED, CONCAT_ARGS, NO_OBJ ),
|
2007-04-16 18:28:06 +02:00
|
|
|
T01("uptime", K_UPTIME, GE(1), NO_OBJ ),
|
2007-04-16 06:18:21 +02:00
|
|
|
T01("fingerprint", K_FINGERPRINT, CONCAT_ARGS, NO_OBJ ),
|
2007-04-16 18:28:06 +02:00
|
|
|
T01("hibernating", K_HIBERNATING, GE(1), NO_OBJ ),
|
2007-05-15 00:51:05 +02:00
|
|
|
T01("platform", K_PLATFORM, CONCAT_ARGS, NO_OBJ ),
|
2016-08-19 20:10:20 +02:00
|
|
|
T01("proto", K_PROTO, CONCAT_ARGS, NO_OBJ ),
|
2007-05-15 00:51:05 +02:00
|
|
|
T01("contact", K_CONTACT, CONCAT_ARGS, NO_OBJ ),
|
2007-04-16 06:18:21 +02:00
|
|
|
T01("read-history", K_READ_HISTORY, ARGS, NO_OBJ ),
|
|
|
|
T01("write-history", K_WRITE_HISTORY, ARGS, NO_OBJ ),
|
2007-04-16 18:28:06 +02:00
|
|
|
T01("extra-info-digest", K_EXTRA_INFO_DIGEST, GE(1), NO_OBJ ),
|
2007-10-29 20:10:42 +01:00
|
|
|
T01("hidden-service-dir", K_HIDDEN_SERVICE_DIR, NO_ARGS, NO_OBJ ),
|
2014-10-01 05:36:47 +02:00
|
|
|
T01("identity-ed25519", K_IDENTITY_ED25519, NO_ARGS, NEED_OBJ ),
|
2015-06-01 17:24:55 +02:00
|
|
|
T01("master-key-ed25519", K_MASTER_KEY_ED25519, GE(1), NO_OBJ ),
|
2014-10-01 05:36:47 +02:00
|
|
|
T01("router-sig-ed25519", K_ROUTER_SIG_ED25519, GE(1), NO_OBJ ),
|
2014-10-01 17:54:07 +02:00
|
|
|
T01("onion-key-crosscert", K_ONION_KEY_CROSSCERT, NO_ARGS, NEED_OBJ ),
|
|
|
|
T01("ntor-onion-key-crosscert", K_NTOR_ONION_KEY_CROSSCERT,
|
|
|
|
EQ(1), NEED_OBJ ),
|
2014-10-01 05:36:47 +02:00
|
|
|
|
2008-09-26 20:58:45 +02:00
|
|
|
T01("allow-single-hop-exits",K_ALLOW_SINGLE_HOP_EXITS, NO_ARGS, NO_OBJ ),
|
2007-05-15 00:51:05 +02:00
|
|
|
|
|
|
|
T01("family", K_FAMILY, ARGS, NO_OBJ ),
|
2007-04-16 06:18:21 +02:00
|
|
|
T01("caches-extra-info", K_CACHES_EXTRA_INFO, NO_ARGS, NO_OBJ ),
|
2011-11-08 22:51:30 +01:00
|
|
|
T0N("or-address", K_OR_ADDRESS, GE(1), NO_OBJ ),
|
2007-05-15 00:51:05 +02:00
|
|
|
|
|
|
|
T0N("opt", K_OPT, CONCAT_ARGS, OBJ_OK ),
|
2007-04-16 18:28:06 +02:00
|
|
|
T1( "bandwidth", K_BANDWIDTH, GE(3), NO_OBJ ),
|
2007-09-26 18:19:44 +02:00
|
|
|
A01("@purpose", A_PURPOSE, GE(1), NO_OBJ ),
|
2014-10-28 18:12:52 +01:00
|
|
|
T01("tunnelled-dir-server",K_DIR_TUNNELLED, NO_ARGS, NO_OBJ ),
|
2007-04-16 06:18:21 +02:00
|
|
|
|
|
|
|
END_OF_TABLE
|
|
|
|
};
|
|
|
|
|
2012-06-05 06:17:54 +02:00
|
|
|
/** List of tokens recognized in extra-info documents. */
|
2007-04-16 06:18:21 +02:00
|
|
|
static token_rule_t extrainfo_token_table[] = {
|
2007-06-06 06:51:30 +02:00
|
|
|
T1_END( "router-signature", K_ROUTER_SIGNATURE, NO_ARGS, NEED_OBJ ),
|
2007-04-16 06:18:21 +02:00
|
|
|
T1( "published", K_PUBLISHED, CONCAT_ARGS, NO_OBJ ),
|
2015-05-28 16:42:22 +02:00
|
|
|
T01("identity-ed25519", K_IDENTITY_ED25519, NO_ARGS, NEED_OBJ ),
|
|
|
|
T01("router-sig-ed25519", K_ROUTER_SIG_ED25519, GE(1), NO_OBJ ),
|
2007-04-16 06:18:21 +02:00
|
|
|
T0N("opt", K_OPT, CONCAT_ARGS, OBJ_OK ),
|
|
|
|
T01("read-history", K_READ_HISTORY, ARGS, NO_OBJ ),
|
|
|
|
T01("write-history", K_WRITE_HISTORY, ARGS, NO_OBJ ),
|
2009-08-14 15:30:24 +02:00
|
|
|
T01("dirreq-stats-end", K_DIRREQ_END, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v2-ips", K_DIRREQ_V2_IPS, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v3-ips", K_DIRREQ_V3_IPS, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v2-reqs", K_DIRREQ_V2_REQS, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v3-reqs", K_DIRREQ_V3_REQS, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v2-share", K_DIRREQ_V2_SHARE, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v3-share", K_DIRREQ_V3_SHARE, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v2-resp", K_DIRREQ_V2_RESP, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v3-resp", K_DIRREQ_V3_RESP, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v2-direct-dl", K_DIRREQ_V2_DIR, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v3-direct-dl", K_DIRREQ_V3_DIR, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v2-tunneled-dl", K_DIRREQ_V2_TUN, ARGS, NO_OBJ ),
|
|
|
|
T01("dirreq-v3-tunneled-dl", K_DIRREQ_V3_TUN, ARGS, NO_OBJ ),
|
|
|
|
T01("entry-stats-end", K_ENTRY_END, ARGS, NO_OBJ ),
|
|
|
|
T01("entry-ips", K_ENTRY_IPS, ARGS, NO_OBJ ),
|
|
|
|
T01("cell-stats-end", K_CELL_END, ARGS, NO_OBJ ),
|
|
|
|
T01("cell-processed-cells", K_CELL_PROCESSED, ARGS, NO_OBJ ),
|
|
|
|
T01("cell-queued-cells", K_CELL_QUEUED, ARGS, NO_OBJ ),
|
|
|
|
T01("cell-time-in-queue", K_CELL_TIME, ARGS, NO_OBJ ),
|
|
|
|
T01("cell-circuits-per-decile", K_CELL_CIRCS, ARGS, NO_OBJ ),
|
|
|
|
T01("exit-stats-end", K_EXIT_END, ARGS, NO_OBJ ),
|
|
|
|
T01("exit-kibibytes-written", K_EXIT_WRITTEN, ARGS, NO_OBJ ),
|
|
|
|
T01("exit-kibibytes-read", K_EXIT_READ, ARGS, NO_OBJ ),
|
|
|
|
T01("exit-streams-opened", K_EXIT_OPENED, ARGS, NO_OBJ ),
|
|
|
|
|
2007-06-06 06:51:30 +02:00
|
|
|
T1_START( "extra-info", K_EXTRA_INFO, GE(2), NO_OBJ ),
|
2007-04-16 06:18:21 +02:00
|
|
|
|
|
|
|
END_OF_TABLE
|
|
|
|
};
|
|
|
|
|
|
|
|
#undef T
|
|
|
|
|
2004-05-10 19:30:51 +02:00
|
|
|
/* static function prototypes */
|
|
|
|
static int router_add_exit_policy(routerinfo_t *router,directory_token_t *tok);
|
|
|
|
static smartlist_t *find_all_exitpolicy(smartlist_t *s);
|
2016-03-11 19:26:04 +01:00
|
|
|
|
2004-05-10 19:30:51 +02:00
|
|
|
/** Set <b>digest</b> to the SHA-1 digest of the hash of the first router in
|
2005-09-13 23:14:55 +02:00
|
|
|
* <b>s</b>. Return 0 on success, -1 on failure.
|
2004-05-10 19:30:51 +02:00
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
int
|
2010-02-25 10:31:36 +01:00
|
|
|
router_get_router_hash(const char *s, size_t s_len, char *digest)
|
2004-05-10 19:30:51 +02:00
|
|
|
{
|
2010-02-25 10:31:36 +01:00
|
|
|
return router_get_hash_impl(s, s_len, digest,
|
2009-08-24 18:51:33 +02:00
|
|
|
"router ","\nrouter-signature", '\n',
|
|
|
|
DIGEST_SHA1);
|
2004-05-10 19:30:51 +02:00
|
|
|
}
|
|
|
|
|
2012-05-10 23:27:16 +02:00
|
|
|
/** Set <b>digest</b> to the SHA-1 digest of the hash of the <b>s_len</b>-byte
|
|
|
|
* extrainfo string at <b>s</b>. Return 0 on success, -1 on failure. */
|
2007-04-16 06:17:58 +02:00
|
|
|
int
|
2012-05-10 23:27:16 +02:00
|
|
|
router_get_extrainfo_hash(const char *s, size_t s_len, char *digest)
|
2007-04-16 06:17:58 +02:00
|
|
|
{
|
2012-05-10 23:27:16 +02:00
|
|
|
return router_get_hash_impl(s, s_len, digest, "extra-info",
|
2010-02-27 21:34:02 +01:00
|
|
|
"\nrouter-signature",'\n', DIGEST_SHA1);
|
2007-04-16 06:17:58 +02:00
|
|
|
}
|
|
|
|
|
2007-10-04 18:21:58 +02:00
|
|
|
/** Helper: move *<b>s_ptr</b> ahead to the next router, the next extra-info,
|
|
|
|
* or to the first of the annotations proceeding the next router or
|
|
|
|
* extra-info---whichever comes first. Set <b>is_extrainfo_out</b> to true if
|
|
|
|
* we found an extrainfo, or false if found a router. Do not scan beyond
|
|
|
|
* <b>eos</b>. Return -1 if we found nothing; 0 if we found something. */
|
2007-10-02 03:22:42 +02:00
|
|
|
static int
|
|
|
|
find_start_of_next_router_or_extrainfo(const char **s_ptr,
|
|
|
|
const char *eos,
|
|
|
|
int *is_extrainfo_out)
|
|
|
|
{
|
|
|
|
const char *annotations = NULL;
|
|
|
|
const char *s = *s_ptr;
|
|
|
|
|
|
|
|
s = eat_whitespace_eos(s, eos);
|
|
|
|
|
|
|
|
while (s < eos-32) { /* 32 gives enough room for a the first keyword. */
|
|
|
|
/* We're at the start of a line. */
|
|
|
|
tor_assert(*s != '\n');
|
|
|
|
|
|
|
|
if (*s == '@' && !annotations) {
|
|
|
|
annotations = s;
|
|
|
|
} else if (*s == 'r' && !strcmpstart(s, "router ")) {
|
|
|
|
*s_ptr = annotations ? annotations : s;
|
|
|
|
*is_extrainfo_out = 0;
|
|
|
|
return 0;
|
|
|
|
} else if (*s == 'e' && !strcmpstart(s, "extra-info ")) {
|
|
|
|
*s_ptr = annotations ? annotations : s;
|
|
|
|
*is_extrainfo_out = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(s = memchr(s+1, '\n', eos-(s+1))))
|
|
|
|
break;
|
|
|
|
s = eat_whitespace_eos(s, eos);
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2004-07-17 01:05:40 +02:00
|
|
|
/** Given a string *<b>s</b> containing a concatenated sequence of router
|
2018-08-30 03:05:24 +02:00
|
|
|
* descriptors (or extra-info documents if <b>want_extrainfo</b> is set),
|
|
|
|
* parses them and stores the result in <b>dest</b>. All routers are marked
|
|
|
|
* running and valid. Advances *s to a point immediately following the last
|
|
|
|
* router entry. Ignore any trailing router entries that are not complete.
|
2007-01-22 08:51:06 +01:00
|
|
|
*
|
|
|
|
* If <b>saved_location</b> isn't SAVED_IN_CACHE, make a local copy of each
|
|
|
|
* descriptor in the signed_descriptor_body field of each routerinfo_t. If it
|
|
|
|
* isn't SAVED_NOWHERE, remember the offset of each descriptor.
|
|
|
|
*
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
* Returns 0 on success and -1 on failure. Adds a digest to
|
2014-10-13 20:15:56 +02:00
|
|
|
* <b>invalid_digests_out</b> for every entry that was unparseable or
|
|
|
|
* invalid. (This may cause duplicate entries.)
|
2004-05-10 19:30:51 +02:00
|
|
|
*/
|
|
|
|
int
|
2007-05-22 04:20:52 +02:00
|
|
|
router_parse_list_from_string(const char **s, const char *eos,
|
|
|
|
smartlist_t *dest,
|
2007-04-30 21:48:39 +02:00
|
|
|
saved_location_t saved_location,
|
2007-09-26 18:19:44 +02:00
|
|
|
int want_extrainfo,
|
2007-09-27 18:08:10 +02:00
|
|
|
int allow_annotations,
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
const char *prepend_annotations,
|
|
|
|
smartlist_t *invalid_digests_out)
|
2004-05-10 19:30:51 +02:00
|
|
|
{
|
|
|
|
routerinfo_t *router;
|
2007-04-30 21:48:39 +02:00
|
|
|
extrainfo_t *extrainfo;
|
2014-09-02 17:55:53 +02:00
|
|
|
signed_descriptor_t *signed_desc = NULL;
|
2007-04-30 21:48:39 +02:00
|
|
|
void *elt;
|
2007-05-15 09:13:56 +02:00
|
|
|
const char *end, *start;
|
|
|
|
int have_extrainfo;
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2004-10-17 00:14:52 +02:00
|
|
|
tor_assert(s);
|
|
|
|
tor_assert(*s);
|
2005-09-30 23:22:25 +02:00
|
|
|
tor_assert(dest);
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2006-04-29 20:42:26 +02:00
|
|
|
start = *s;
|
2007-05-22 04:20:52 +02:00
|
|
|
if (!eos)
|
|
|
|
eos = *s + strlen(*s);
|
|
|
|
|
|
|
|
tor_assert(eos >= *s);
|
|
|
|
|
2004-05-10 19:30:51 +02:00
|
|
|
while (1) {
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
char raw_digest[DIGEST_LEN];
|
|
|
|
int have_raw_digest = 0;
|
|
|
|
int dl_again = 0;
|
2007-10-02 03:22:42 +02:00
|
|
|
if (find_start_of_next_router_or_extrainfo(s, eos, &have_extrainfo) < 0)
|
2007-05-22 04:20:52 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
end = tor_memstr(*s, eos-*s, "\nrouter-signature");
|
2007-05-15 09:13:56 +02:00
|
|
|
if (end)
|
2007-07-16 05:39:21 +02:00
|
|
|
end = tor_memstr(end, eos-end, "\n-----END SIGNATURE-----\n");
|
2007-05-15 09:13:56 +02:00
|
|
|
if (end)
|
|
|
|
end += strlen("\n-----END SIGNATURE-----\n");
|
2005-10-14 04:26:13 +02:00
|
|
|
|
2007-05-15 09:13:56 +02:00
|
|
|
if (!end)
|
|
|
|
break;
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2007-05-29 17:08:55 +02:00
|
|
|
elt = NULL;
|
|
|
|
|
2007-05-15 09:13:56 +02:00
|
|
|
if (have_extrainfo && want_extrainfo) {
|
2007-04-30 21:48:39 +02:00
|
|
|
routerlist_t *rl = router_get_routerlist();
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
have_raw_digest = router_get_extrainfo_hash(*s, end-*s, raw_digest) == 0;
|
2007-04-30 21:48:39 +02:00
|
|
|
extrainfo = extrainfo_parse_entry_from_string(*s, end,
|
|
|
|
saved_location != SAVED_IN_CACHE,
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
rl->identity_map, &dl_again);
|
2007-05-29 17:08:55 +02:00
|
|
|
if (extrainfo) {
|
|
|
|
signed_desc = &extrainfo->cache_info;
|
|
|
|
elt = extrainfo;
|
|
|
|
}
|
2007-05-15 09:13:56 +02:00
|
|
|
} else if (!have_extrainfo && !want_extrainfo) {
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
have_raw_digest = router_get_router_hash(*s, end-*s, raw_digest) == 0;
|
2007-04-30 21:48:39 +02:00
|
|
|
router = router_parse_entry_from_string(*s, end,
|
2007-09-26 18:19:44 +02:00
|
|
|
saved_location != SAVED_IN_CACHE,
|
2007-09-27 18:08:10 +02:00
|
|
|
allow_annotations,
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
prepend_annotations, &dl_again);
|
2007-05-29 17:08:55 +02:00
|
|
|
if (router) {
|
2007-12-19 04:11:02 +01:00
|
|
|
log_debug(LD_DIR, "Read router '%s', purpose '%s'",
|
2011-05-16 03:58:46 +02:00
|
|
|
router_describe(router),
|
|
|
|
router_purpose_to_string(router->purpose));
|
2007-05-29 17:08:55 +02:00
|
|
|
signed_desc = &router->cache_info;
|
|
|
|
elt = router;
|
|
|
|
}
|
|
|
|
}
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
if (! elt && ! dl_again && have_raw_digest && invalid_digests_out) {
|
|
|
|
smartlist_add(invalid_digests_out, tor_memdup(raw_digest, DIGEST_LEN));
|
|
|
|
}
|
2007-05-29 17:08:55 +02:00
|
|
|
if (!elt) {
|
2006-06-22 09:19:28 +02:00
|
|
|
*s = end;
|
2004-05-10 19:30:51 +02:00
|
|
|
continue;
|
|
|
|
}
|
2006-06-22 09:01:54 +02:00
|
|
|
if (saved_location != SAVED_NOWHERE) {
|
2014-09-02 17:55:53 +02:00
|
|
|
tor_assert(signed_desc);
|
2007-04-30 21:48:39 +02:00
|
|
|
signed_desc->saved_location = saved_location;
|
|
|
|
signed_desc->saved_offset = *s - start;
|
2006-04-29 20:42:26 +02:00
|
|
|
}
|
2006-06-22 09:19:28 +02:00
|
|
|
*s = end;
|
2007-04-30 21:48:39 +02:00
|
|
|
smartlist_add(dest, elt);
|
2004-05-10 19:30:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-08-14 14:03:58 +02:00
|
|
|
/** Try to find an IPv6 OR port in <b>list</b> of directory_token_t's
|
|
|
|
* with at least one argument (use GE(1) in setup). If found, store
|
|
|
|
* address and port number to <b>addr_out</b> and
|
|
|
|
* <b>port_out</b>. Return number of OR ports found. */
|
2018-10-01 06:51:47 +02:00
|
|
|
int
|
2012-08-14 14:03:58 +02:00
|
|
|
find_single_ipv6_orport(const smartlist_t *list,
|
|
|
|
tor_addr_t *addr_out,
|
|
|
|
uint16_t *port_out)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
tor_assert(list != NULL);
|
|
|
|
tor_assert(addr_out != NULL);
|
|
|
|
tor_assert(port_out != NULL);
|
|
|
|
|
|
|
|
SMARTLIST_FOREACH_BEGIN(list, directory_token_t *, t) {
|
|
|
|
tor_addr_t a;
|
|
|
|
maskbits_t bits;
|
|
|
|
uint16_t port_min, port_max;
|
|
|
|
tor_assert(t->n_args >= 1);
|
|
|
|
/* XXXX Prop186 the full spec allows much more than this. */
|
2012-10-24 18:33:18 +02:00
|
|
|
if (tor_addr_parse_mask_ports(t->args[0], 0,
|
|
|
|
&a, &bits, &port_min,
|
2012-08-14 14:03:58 +02:00
|
|
|
&port_max) == AF_INET6 &&
|
|
|
|
bits == 128 &&
|
|
|
|
port_min == port_max) {
|
|
|
|
/* Okay, this is one we can understand. Use it and ignore
|
|
|
|
any potential more addresses in list. */
|
|
|
|
tor_addr_copy(addr_out, &a);
|
|
|
|
*port_out = port_min;
|
|
|
|
ret = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} SMARTLIST_FOREACH_END(t);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2004-05-10 19:30:51 +02:00
|
|
|
/** Helper function: reads a single router entry from *<b>s</b> ...
|
|
|
|
* *<b>end</b>. Mallocs a new router and returns it if all goes well, else
|
2007-01-22 08:51:06 +01:00
|
|
|
* returns NULL. If <b>cache_copy</b> is true, duplicate the contents of
|
|
|
|
* s through end into the signed_descriptor_body of the resulting
|
|
|
|
* routerinfo_t.
|
2007-10-04 18:21:58 +02:00
|
|
|
*
|
2012-03-30 16:58:32 +02:00
|
|
|
* If <b>end</b> is NULL, <b>s</b> must be properly NUL-terminated.
|
2010-02-25 10:31:36 +01:00
|
|
|
*
|
2007-10-04 18:21:58 +02:00
|
|
|
* If <b>allow_annotations</b>, it's okay to encounter annotations in <b>s</b>
|
|
|
|
* before the router; if it's false, reject the router if it's annotated. If
|
|
|
|
* <b>prepend_annotations</b> is set, it should contain some annotations:
|
|
|
|
* append them to the front of the router before parsing it, and keep them
|
|
|
|
* around when caching the router.
|
|
|
|
*
|
|
|
|
* Only one of allow_annotations and prepend_annotations may be set.
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
*
|
|
|
|
* If <b>can_dl_again_out</b> is provided, set *<b>can_dl_again_out</b> to 1
|
|
|
|
* if it's okay to try to download a descriptor with this same digest again,
|
|
|
|
* and 0 if it isn't. (It might not be okay to download it again if part of
|
|
|
|
* the part covered by the digest is invalid.)
|
2004-05-10 19:30:51 +02:00
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
routerinfo_t *
|
2006-06-22 09:01:54 +02:00
|
|
|
router_parse_entry_from_string(const char *s, const char *end,
|
2007-09-27 18:08:10 +02:00
|
|
|
int cache_copy, int allow_annotations,
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
const char *prepend_annotations,
|
|
|
|
int *can_dl_again_out)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2004-05-10 19:30:51 +02:00
|
|
|
routerinfo_t *router = NULL;
|
|
|
|
char digest[128];
|
|
|
|
smartlist_t *tokens = NULL, *exit_policy_tokens = NULL;
|
|
|
|
directory_token_t *tok;
|
2005-08-16 04:52:27 +02:00
|
|
|
struct in_addr in;
|
2009-08-20 18:45:03 +02:00
|
|
|
const char *start_of_annotations, *cp, *s_dup = s;
|
2007-09-27 22:46:30 +02:00
|
|
|
size_t prepend_len = prepend_annotations ? strlen(prepend_annotations) : 0;
|
2008-02-06 01:54:47 +01:00
|
|
|
int ok = 1;
|
2008-03-26 17:33:33 +01:00
|
|
|
memarea_t *area = NULL;
|
2014-10-01 17:54:07 +02:00
|
|
|
tor_cert_t *ntor_cc_cert = NULL;
|
2014-10-13 20:22:52 +02:00
|
|
|
/* Do not set this to '1' until we have parsed everything that we intend to
|
|
|
|
* parse that's covered by the hash. */
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
int can_dl_again = 0;
|
2018-08-23 20:05:42 +02:00
|
|
|
crypto_pk_t *rsa_pubkey = NULL;
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2007-09-27 18:08:10 +02:00
|
|
|
tor_assert(!allow_annotations || !prepend_annotations);
|
|
|
|
|
2004-05-10 19:30:51 +02:00
|
|
|
if (!end) {
|
|
|
|
end = s + strlen(s);
|
|
|
|
}
|
|
|
|
|
2005-10-07 20:33:30 +02:00
|
|
|
/* point 'end' to a point immediately after the final newline. */
|
|
|
|
while (end > s+2 && *(end-1) == '\n' && *(end-2) == '\n')
|
|
|
|
--end;
|
|
|
|
|
2008-11-05 21:34:22 +01:00
|
|
|
area = memarea_new();
|
2012-01-18 21:53:30 +01:00
|
|
|
tokens = smartlist_new();
|
2007-09-27 18:08:10 +02:00
|
|
|
if (prepend_annotations) {
|
2008-03-26 17:33:33 +01:00
|
|
|
if (tokenize_string(area,prepend_annotations,NULL,tokens,
|
2007-09-27 18:08:10 +02:00
|
|
|
routerdesc_token_table,TS_NOCHECK)) {
|
2007-12-19 04:11:02 +01:00
|
|
|
log_warn(LD_DIR, "Error tokenizing router descriptor (annotations).");
|
2007-09-27 18:08:10 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-09-26 18:19:44 +02:00
|
|
|
start_of_annotations = s;
|
|
|
|
cp = tor_memstr(s, end-s, "\nrouter ");
|
|
|
|
if (!cp) {
|
|
|
|
if (end-s < 7 || strcmpstart(s, "router ")) {
|
|
|
|
log_warn(LD_DIR, "No router keyword found.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s = cp+1;
|
|
|
|
}
|
|
|
|
|
2010-11-02 16:45:14 +01:00
|
|
|
if (start_of_annotations != s) { /* We have annotations */
|
|
|
|
if (allow_annotations) {
|
|
|
|
if (tokenize_string(area,start_of_annotations,s,tokens,
|
|
|
|
routerdesc_token_table,TS_NOCHECK)) {
|
|
|
|
log_warn(LD_DIR, "Error tokenizing router descriptor (annotations).");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log_warn(LD_DIR, "Found unexpected annotations on router descriptor not "
|
|
|
|
"loaded from disk. Dropping it.");
|
2007-12-19 04:11:02 +01:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-25 10:31:36 +01:00
|
|
|
if (router_get_router_hash(s, end - s, digest) < 0) {
|
2006-02-13 11:33:00 +01:00
|
|
|
log_warn(LD_DIR, "Couldn't compute router hash.");
|
2008-01-16 06:27:19 +01:00
|
|
|
goto err;
|
2004-05-10 19:30:51 +02:00
|
|
|
}
|
2007-09-27 18:08:10 +02:00
|
|
|
{
|
|
|
|
int flags = 0;
|
|
|
|
if (allow_annotations)
|
|
|
|
flags |= TS_ANNOTATIONS_OK;
|
|
|
|
if (prepend_annotations)
|
|
|
|
flags |= TS_ANNOTATIONS_OK|TS_NO_NEW_ANNOTATIONS;
|
|
|
|
|
2008-03-26 17:33:33 +01:00
|
|
|
if (tokenize_string(area,s,end,tokens,routerdesc_token_table, flags)) {
|
2007-09-27 18:08:10 +02:00
|
|
|
log_warn(LD_DIR, "Error tokenizing router descriptor.");
|
|
|
|
goto err;
|
|
|
|
}
|
2004-05-10 19:30:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (smartlist_len(tokens) < 2) {
|
2006-02-13 11:33:00 +01:00
|
|
|
log_warn(LD_DIR, "Impossibly short router descriptor.");
|
2004-05-10 19:30:51 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
tok = find_by_keyword(tokens, K_ROUTER);
|
2014-10-08 15:13:09 +02:00
|
|
|
const int router_token_pos = smartlist_pos(tokens, tok);
|
2007-04-16 18:28:06 +02:00
|
|
|
tor_assert(tok->n_args >= 5);
|
2004-05-10 19:30:51 +02:00
|
|
|
|
|
|
|
router = tor_malloc_zero(sizeof(routerinfo_t));
|
2014-10-01 17:54:07 +02:00
|
|
|
router->cert_expiration_time = TIME_MAX;
|
2007-11-07 18:11:23 +01:00
|
|
|
router->cache_info.routerlist_index = -1;
|
2007-09-27 22:46:30 +02:00
|
|
|
router->cache_info.annotations_len = s-start_of_annotations + prepend_len;
|
2005-11-05 21:15:27 +01:00
|
|
|
router->cache_info.signed_descriptor_len = end-s;
|
2007-09-27 18:08:10 +02:00
|
|
|
if (cache_copy) {
|
|
|
|
size_t len = router->cache_info.signed_descriptor_len +
|
2008-02-20 00:29:45 +01:00
|
|
|
router->cache_info.annotations_len;
|
2016-07-05 20:19:31 +02:00
|
|
|
char *signed_body =
|
2007-09-27 18:08:10 +02:00
|
|
|
router->cache_info.signed_descriptor_body = tor_malloc(len+1);
|
|
|
|
if (prepend_annotations) {
|
2016-07-05 20:19:31 +02:00
|
|
|
memcpy(signed_body, prepend_annotations, prepend_len);
|
|
|
|
signed_body += prepend_len;
|
2007-09-27 18:08:10 +02:00
|
|
|
}
|
2008-02-20 00:57:06 +01:00
|
|
|
/* This assertion will always succeed.
|
|
|
|
* len == signed_desc_len + annotations_len
|
|
|
|
* == end-s + s-start_of_annotations + prepend_len
|
|
|
|
* == end-start_of_annotations + prepend_len
|
|
|
|
* We already wrote prepend_len bytes into the buffer; now we're
|
|
|
|
* writing end-start_of_annotations -NM. */
|
2016-07-05 20:19:31 +02:00
|
|
|
tor_assert(signed_body+(end-start_of_annotations) ==
|
2008-02-20 00:29:45 +01:00
|
|
|
router->cache_info.signed_descriptor_body+len);
|
2016-07-05 20:19:31 +02:00
|
|
|
memcpy(signed_body, start_of_annotations, end-start_of_annotations);
|
2007-09-27 22:46:30 +02:00
|
|
|
router->cache_info.signed_descriptor_body[len] = '\0';
|
|
|
|
tor_assert(strlen(router->cache_info.signed_descriptor_body) == len);
|
2007-09-27 18:08:10 +02:00
|
|
|
}
|
2005-11-05 21:15:27 +01:00
|
|
|
memcpy(router->cache_info.signed_descriptor_digest, digest, DIGEST_LEN);
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2007-04-16 18:28:06 +02:00
|
|
|
router->nickname = tor_strdup(tok->args[0]);
|
|
|
|
if (!is_legal_nickname(router->nickname)) {
|
|
|
|
log_warn(LD_DIR,"Router nickname is invalid");
|
2004-05-10 19:30:51 +02:00
|
|
|
goto err;
|
|
|
|
}
|
2013-02-10 04:07:22 +01:00
|
|
|
if (!tor_inet_aton(tok->args[1], &in)) {
|
2007-12-19 04:11:02 +01:00
|
|
|
log_warn(LD_DIR,"Router address is not an IP address.");
|
2004-05-10 19:30:51 +02:00
|
|
|
goto err;
|
2007-04-16 18:28:06 +02:00
|
|
|
}
|
|
|
|
router->addr = ntohl(in.s_addr);
|
2007-02-24 20:29:42 +01:00
|
|
|
|
2007-04-16 18:28:06 +02:00
|
|
|
router->or_port =
|
2008-02-06 01:54:47 +01:00
|
|
|
(uint16_t) tor_parse_long(tok->args[2],10,0,65535,&ok,NULL);
|
|
|
|
if (!ok) {
|
|
|
|
log_warn(LD_DIR,"Invalid OR port %s", escaped(tok->args[2]));
|
|
|
|
goto err;
|
|
|
|
}
|
2007-04-16 18:28:06 +02:00
|
|
|
router->dir_port =
|
2008-02-06 01:54:47 +01:00
|
|
|
(uint16_t) tor_parse_long(tok->args[4],10,0,65535,&ok,NULL);
|
|
|
|
if (!ok) {
|
|
|
|
log_warn(LD_DIR,"Invalid dir port %s", escaped(tok->args[4]));
|
|
|
|
goto err;
|
|
|
|
}
|
2007-02-24 20:29:42 +01:00
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
tok = find_by_keyword(tokens, K_BANDWIDTH);
|
|
|
|
tor_assert(tok->n_args >= 3);
|
2008-02-22 20:09:45 +01:00
|
|
|
router->bandwidthrate = (int)
|
2008-02-06 01:54:47 +01:00
|
|
|
tor_parse_long(tok->args[0],10,1,INT_MAX,&ok,NULL);
|
2007-02-24 20:29:42 +01:00
|
|
|
|
2008-02-06 01:54:47 +01:00
|
|
|
if (!ok) {
|
2007-04-16 18:28:06 +02:00
|
|
|
log_warn(LD_DIR, "bandwidthrate %s unreadable or 0. Failing.",
|
|
|
|
escaped(tok->args[0]));
|
|
|
|
goto err;
|
2004-05-10 19:30:51 +02:00
|
|
|
}
|
2008-02-22 20:09:45 +01:00
|
|
|
router->bandwidthburst =
|
|
|
|
(int) tor_parse_long(tok->args[1],10,0,INT_MAX,&ok,NULL);
|
2008-02-06 01:54:47 +01:00
|
|
|
if (!ok) {
|
|
|
|
log_warn(LD_DIR, "Invalid bandwidthburst %s", escaped(tok->args[1]));
|
|
|
|
goto err;
|
|
|
|
}
|
2008-02-22 20:09:45 +01:00
|
|
|
router->bandwidthcapacity = (int)
|
2008-02-06 01:54:47 +01:00
|
|
|
tor_parse_long(tok->args[2],10,0,INT_MAX,&ok,NULL);
|
|
|
|
if (!ok) {
|
|
|
|
log_warn(LD_DIR, "Invalid bandwidthcapacity %s", escaped(tok->args[1]));
|
|
|
|
goto err;
|
|
|
|
}
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
if ((tok = find_opt_by_keyword(tokens, A_PURPOSE))) {
|
2008-12-11 21:28:50 +01:00
|
|
|
tor_assert(tok->n_args);
|
2007-09-27 22:46:30 +02:00
|
|
|
router->purpose = router_purpose_from_string(tok->args[0]);
|
|
|
|
} else {
|
|
|
|
router->purpose = ROUTER_PURPOSE_GENERAL;
|
|
|
|
}
|
2008-01-10 18:48:40 +01:00
|
|
|
router->cache_info.send_unencrypted =
|
|
|
|
(router->purpose == ROUTER_PURPOSE_GENERAL) ? 1 : 0;
|
2007-09-27 22:46:30 +02:00
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
if ((tok = find_opt_by_keyword(tokens, K_UPTIME))) {
|
2007-04-16 18:28:06 +02:00
|
|
|
tor_assert(tok->n_args >= 1);
|
2008-02-06 01:54:47 +01:00
|
|
|
router->uptime = tor_parse_long(tok->args[0],10,0,LONG_MAX,&ok,NULL);
|
|
|
|
if (!ok) {
|
|
|
|
log_warn(LD_DIR, "Invalid uptime %s", escaped(tok->args[0]));
|
|
|
|
goto err;
|
|
|
|
}
|
2004-08-17 07:29:41 +02:00
|
|
|
}
|
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
if ((tok = find_opt_by_keyword(tokens, K_HIBERNATING))) {
|
2007-04-16 18:28:06 +02:00
|
|
|
tor_assert(tok->n_args >= 1);
|
|
|
|
router->is_hibernating
|
|
|
|
= (tor_parse_long(tok->args[0],10,0,LONG_MAX,NULL,NULL) != 0);
|
2005-08-30 17:04:24 +02:00
|
|
|
}
|
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
tok = find_by_keyword(tokens, K_PUBLISHED);
|
2004-05-10 19:30:51 +02:00
|
|
|
tor_assert(tok->n_args == 1);
|
2005-11-05 21:15:27 +01:00
|
|
|
if (parse_iso_time(tok->args[0], &router->cache_info.published_on) < 0)
|
2004-10-17 00:14:52 +02:00
|
|
|
goto err;
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
tok = find_by_keyword(tokens, K_ONION_KEY);
|
2011-05-16 20:44:23 +02:00
|
|
|
if (!crypto_pk_public_exponent_ok(tok->key)) {
|
2011-04-28 23:35:03 +02:00
|
|
|
log_warn(LD_DIR,
|
|
|
|
"Relay's onion key had invalid exponent.");
|
|
|
|
goto err;
|
|
|
|
}
|
2018-12-14 19:28:07 +01:00
|
|
|
router->onion_pkey = tor_memdup(tok->object_body, tok->object_size);
|
|
|
|
router->onion_pkey_len = tok->object_size;
|
2018-08-23 20:05:42 +02:00
|
|
|
crypto_pk_free(tok->key);
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2012-12-04 21:58:18 +01:00
|
|
|
if ((tok = find_opt_by_keyword(tokens, K_ONION_KEY_NTOR))) {
|
2013-01-06 04:53:32 +01:00
|
|
|
curve25519_public_key_t k;
|
2012-12-04 21:58:18 +01:00
|
|
|
tor_assert(tok->n_args >= 1);
|
2013-01-06 04:53:32 +01:00
|
|
|
if (curve25519_public_from_base64(&k, tok->args[0]) < 0) {
|
|
|
|
log_warn(LD_DIR, "Bogus ntor-onion-key in routerinfo");
|
2012-12-04 21:58:18 +01:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
router->onion_curve25519_pkey =
|
2013-01-06 04:53:32 +01:00
|
|
|
tor_memdup(&k, sizeof(curve25519_public_key_t));
|
2012-12-04 21:58:18 +01:00
|
|
|
}
|
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
tok = find_by_keyword(tokens, K_SIGNING_KEY);
|
2004-05-10 19:30:51 +02:00
|
|
|
router->identity_pkey = tok->key;
|
|
|
|
tok->key = NULL; /* Prevent free */
|
2005-11-05 21:15:27 +01:00
|
|
|
if (crypto_pk_get_digest(router->identity_pkey,
|
|
|
|
router->cache_info.identity_digest)) {
|
2006-02-13 11:33:00 +01:00
|
|
|
log_warn(LD_DIR, "Couldn't calculate key digest"); goto err;
|
2004-07-01 03:16:59 +02:00
|
|
|
}
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2014-10-01 05:36:47 +02:00
|
|
|
{
|
2015-06-01 17:24:55 +02:00
|
|
|
directory_token_t *ed_sig_tok, *ed_cert_tok, *cc_tap_tok, *cc_ntor_tok,
|
|
|
|
*master_key_tok;
|
2014-10-01 05:36:47 +02:00
|
|
|
ed_sig_tok = find_opt_by_keyword(tokens, K_ROUTER_SIG_ED25519);
|
|
|
|
ed_cert_tok = find_opt_by_keyword(tokens, K_IDENTITY_ED25519);
|
2015-06-01 17:24:55 +02:00
|
|
|
master_key_tok = find_opt_by_keyword(tokens, K_MASTER_KEY_ED25519);
|
2014-10-01 17:54:07 +02:00
|
|
|
cc_tap_tok = find_opt_by_keyword(tokens, K_ONION_KEY_CROSSCERT);
|
|
|
|
cc_ntor_tok = find_opt_by_keyword(tokens, K_NTOR_ONION_KEY_CROSSCERT);
|
|
|
|
int n_ed_toks = !!ed_sig_tok + !!ed_cert_tok +
|
|
|
|
!!cc_tap_tok + !!cc_ntor_tok;
|
|
|
|
if ((n_ed_toks != 0 && n_ed_toks != 4) ||
|
|
|
|
(n_ed_toks == 4 && !router->onion_curve25519_pkey)) {
|
|
|
|
log_warn(LD_DIR, "Router descriptor with only partial ed25519/"
|
|
|
|
"cross-certification support");
|
2014-10-01 05:36:47 +02:00
|
|
|
goto err;
|
|
|
|
}
|
2015-06-01 17:24:55 +02:00
|
|
|
if (master_key_tok && !ed_sig_tok) {
|
|
|
|
log_warn(LD_DIR, "Router descriptor has ed25519 master key but no "
|
|
|
|
"certificate");
|
|
|
|
goto err;
|
|
|
|
}
|
2014-10-01 05:36:47 +02:00
|
|
|
if (ed_sig_tok) {
|
2014-10-01 17:54:07 +02:00
|
|
|
tor_assert(ed_cert_tok && cc_tap_tok && cc_ntor_tok);
|
2014-10-08 15:13:09 +02:00
|
|
|
const int ed_cert_token_pos = smartlist_pos(tokens, ed_cert_tok);
|
|
|
|
if (ed_cert_token_pos == -1 || router_token_pos == -1 ||
|
|
|
|
(ed_cert_token_pos != router_token_pos + 1 &&
|
|
|
|
ed_cert_token_pos != router_token_pos - 1)) {
|
2014-10-01 05:36:47 +02:00
|
|
|
log_warn(LD_DIR, "Ed25519 certificate in wrong position");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (ed_sig_tok != smartlist_get(tokens, smartlist_len(tokens)-2)) {
|
|
|
|
log_warn(LD_DIR, "Ed25519 signature in wrong position");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (strcmp(ed_cert_tok->object_type, "ED25519 CERT")) {
|
|
|
|
log_warn(LD_DIR, "Wrong object type on identity-ed25519 in decriptor");
|
|
|
|
goto err;
|
|
|
|
}
|
2014-10-01 17:54:07 +02:00
|
|
|
if (strcmp(cc_ntor_tok->object_type, "ED25519 CERT")) {
|
|
|
|
log_warn(LD_DIR, "Wrong object type on ntor-onion-key-crosscert "
|
|
|
|
"in decriptor");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (strcmp(cc_tap_tok->object_type, "CROSSCERT")) {
|
|
|
|
log_warn(LD_DIR, "Wrong object type on onion-key-crosscert "
|
|
|
|
"in decriptor");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (strcmp(cc_ntor_tok->args[0], "0") &&
|
|
|
|
strcmp(cc_ntor_tok->args[0], "1")) {
|
|
|
|
log_warn(LD_DIR, "Bad sign bit on ntor-onion-key-crosscert");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
int ntor_cc_sign_bit = !strcmp(cc_ntor_tok->args[0], "1");
|
2014-10-01 05:36:47 +02:00
|
|
|
|
|
|
|
uint8_t d256[DIGEST256_LEN];
|
|
|
|
const char *signed_start, *signed_end;
|
|
|
|
tor_cert_t *cert = tor_cert_parse(
|
|
|
|
(const uint8_t*)ed_cert_tok->object_body,
|
|
|
|
ed_cert_tok->object_size);
|
|
|
|
if (! cert) {
|
|
|
|
log_warn(LD_DIR, "Couldn't parse ed25519 cert");
|
|
|
|
goto err;
|
|
|
|
}
|
2016-05-18 02:08:03 +02:00
|
|
|
/* makes sure it gets freed. */
|
|
|
|
router->cache_info.signing_key_cert = cert;
|
2015-06-01 17:24:55 +02:00
|
|
|
|
2014-10-01 05:36:47 +02:00
|
|
|
if (cert->cert_type != CERT_TYPE_ID_SIGNING ||
|
|
|
|
! cert->signing_key_included) {
|
|
|
|
log_warn(LD_DIR, "Invalid form for ed25519 cert");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2015-06-01 17:24:55 +02:00
|
|
|
if (master_key_tok) {
|
|
|
|
/* This token is optional, but if it's present, it must match
|
|
|
|
* the signature in the signing cert, or supplant it. */
|
|
|
|
tor_assert(master_key_tok->n_args >= 1);
|
|
|
|
ed25519_public_key_t pkey;
|
|
|
|
if (ed25519_public_from_base64(&pkey, master_key_tok->args[0])<0) {
|
|
|
|
log_warn(LD_DIR, "Can't parse ed25519 master key");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fast_memneq(&cert->signing_key.pubkey,
|
|
|
|
pkey.pubkey, ED25519_PUBKEY_LEN)) {
|
|
|
|
log_warn(LD_DIR, "Ed25519 master key does not match "
|
|
|
|
"key in certificate");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
2014-10-01 17:54:07 +02:00
|
|
|
ntor_cc_cert = tor_cert_parse((const uint8_t*)cc_ntor_tok->object_body,
|
|
|
|
cc_ntor_tok->object_size);
|
2015-05-28 16:42:22 +02:00
|
|
|
if (!ntor_cc_cert) {
|
2014-10-01 17:54:07 +02:00
|
|
|
log_warn(LD_DIR, "Couldn't parse ntor-onion-key-crosscert cert");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (ntor_cc_cert->cert_type != CERT_TYPE_ONION_ID ||
|
|
|
|
! ed25519_pubkey_eq(&ntor_cc_cert->signed_key, &cert->signing_key)) {
|
|
|
|
log_warn(LD_DIR, "Invalid contents for ntor-onion-key-crosscert cert");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ed25519_public_key_t ntor_cc_pk;
|
|
|
|
if (ed25519_public_key_from_curve25519_public_key(&ntor_cc_pk,
|
|
|
|
router->onion_curve25519_pkey,
|
|
|
|
ntor_cc_sign_bit)<0) {
|
|
|
|
log_warn(LD_DIR, "Error converting onion key to ed25519");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2014-10-01 05:36:47 +02:00
|
|
|
if (router_get_hash_impl_helper(s, end-s, "router ",
|
|
|
|
"\nrouter-sig-ed25519",
|
2017-05-03 17:48:08 +02:00
|
|
|
' ', LOG_WARN,
|
|
|
|
&signed_start, &signed_end) < 0) {
|
2014-10-01 05:36:47 +02:00
|
|
|
log_warn(LD_DIR, "Can't find ed25519-signed portion of descriptor");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
crypto_digest_t *d = crypto_digest256_new(DIGEST_SHA256);
|
|
|
|
crypto_digest_add_bytes(d, ED_DESC_SIGNATURE_PREFIX,
|
|
|
|
strlen(ED_DESC_SIGNATURE_PREFIX));
|
|
|
|
crypto_digest_add_bytes(d, signed_start, signed_end-signed_start);
|
|
|
|
crypto_digest_get_digest(d, (char*)d256, sizeof(d256));
|
|
|
|
crypto_digest_free(d);
|
|
|
|
|
2014-10-01 17:54:07 +02:00
|
|
|
ed25519_checkable_t check[3];
|
|
|
|
int check_ok[3];
|
2016-08-30 14:48:50 +02:00
|
|
|
time_t expires = TIME_MAX;
|
|
|
|
if (tor_cert_get_checkable_sig(&check[0], cert, NULL, &expires) < 0) {
|
2014-10-01 05:36:47 +02:00
|
|
|
log_err(LD_BUG, "Couldn't create 'checkable' for cert.");
|
|
|
|
goto err;
|
|
|
|
}
|
2014-10-01 17:54:07 +02:00
|
|
|
if (tor_cert_get_checkable_sig(&check[1],
|
2016-08-30 14:48:50 +02:00
|
|
|
ntor_cc_cert, &ntor_cc_pk, &expires) < 0) {
|
2014-10-01 17:54:07 +02:00
|
|
|
log_err(LD_BUG, "Couldn't create 'checkable' for ntor_cc_cert.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ed25519_signature_from_base64(&check[2].signature,
|
2014-10-01 05:36:47 +02:00
|
|
|
ed_sig_tok->args[0])<0) {
|
|
|
|
log_warn(LD_DIR, "Couldn't decode ed25519 signature");
|
|
|
|
goto err;
|
|
|
|
}
|
2014-10-01 17:54:07 +02:00
|
|
|
check[2].pubkey = &cert->signed_key;
|
|
|
|
check[2].msg = d256;
|
|
|
|
check[2].len = DIGEST256_LEN;
|
2014-10-01 05:36:47 +02:00
|
|
|
|
2014-10-01 17:54:07 +02:00
|
|
|
if (ed25519_checksig_batch(check_ok, check, 3) < 0) {
|
|
|
|
log_warn(LD_DIR, "Incorrect ed25519 signature(s)");
|
2014-10-01 05:36:47 +02:00
|
|
|
goto err;
|
|
|
|
}
|
2014-10-01 17:54:07 +02:00
|
|
|
|
2018-08-23 20:05:42 +02:00
|
|
|
rsa_pubkey = router_get_rsa_onion_pkey(router->onion_pkey,
|
|
|
|
router->onion_pkey_len);
|
2014-10-01 17:54:07 +02:00
|
|
|
if (check_tap_onion_key_crosscert(
|
|
|
|
(const uint8_t*)cc_tap_tok->object_body,
|
|
|
|
(int)cc_tap_tok->object_size,
|
2018-08-23 20:05:42 +02:00
|
|
|
rsa_pubkey,
|
2014-10-01 17:54:07 +02:00
|
|
|
&cert->signing_key,
|
|
|
|
(const uint8_t*)router->cache_info.identity_digest)<0) {
|
|
|
|
log_warn(LD_DIR, "Incorrect TAP cross-verification");
|
2014-10-01 05:36:47 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2014-10-01 17:54:07 +02:00
|
|
|
/* We check this before adding it to the routerlist. */
|
2016-08-30 14:48:50 +02:00
|
|
|
router->cert_expiration_time = expires;
|
2014-10-01 17:54:07 +02:00
|
|
|
}
|
2004-07-01 03:16:59 +02:00
|
|
|
}
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
if ((tok = find_opt_by_keyword(tokens, K_FINGERPRINT))) {
|
2005-11-15 22:24:32 +01:00
|
|
|
/* If there's a fingerprint line, it must match the identity digest. */
|
|
|
|
char d[DIGEST_LEN];
|
2007-04-16 18:28:06 +02:00
|
|
|
tor_assert(tok->n_args == 1);
|
2005-11-15 22:24:32 +01:00
|
|
|
tor_strstrip(tok->args[0], " ");
|
2016-06-17 16:41:45 +02:00
|
|
|
if (base16_decode(d, DIGEST_LEN,
|
|
|
|
tok->args[0], strlen(tok->args[0])) != DIGEST_LEN) {
|
2006-09-30 00:33:40 +02:00
|
|
|
log_warn(LD_DIR, "Couldn't decode router fingerprint %s",
|
2006-03-05 10:50:26 +01:00
|
|
|
escaped(tok->args[0]));
|
2005-11-15 22:24:32 +01:00
|
|
|
goto err;
|
|
|
|
}
|
2011-05-10 22:58:38 +02:00
|
|
|
if (tor_memneq(d,router->cache_info.identity_digest, DIGEST_LEN)) {
|
2006-02-13 11:33:00 +01:00
|
|
|
log_warn(LD_DIR, "Fingerprint '%s' does not match identity digest.",
|
|
|
|
tok->args[0]);
|
2005-11-15 22:24:32 +01:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-24 19:49:15 +01:00
|
|
|
{
|
|
|
|
const char *version = NULL, *protocols = NULL;
|
|
|
|
if ((tok = find_opt_by_keyword(tokens, K_PLATFORM))) {
|
|
|
|
router->platform = tor_strdup(tok->args[0]);
|
|
|
|
version = tok->args[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((tok = find_opt_by_keyword(tokens, K_PROTO))) {
|
|
|
|
router->protocol_list = tor_strdup(tok->args[0]);
|
|
|
|
protocols = tok->args[0];
|
|
|
|
}
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2018-01-24 19:49:15 +01:00
|
|
|
summarize_protover_flags(&router->pv, protocols, version);
|
2016-08-19 20:10:20 +02:00
|
|
|
}
|
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
if ((tok = find_opt_by_keyword(tokens, K_CONTACT))) {
|
2005-05-02 23:22:31 +02:00
|
|
|
router->contact_info = tor_strdup(tok->args[0]);
|
|
|
|
}
|
|
|
|
|
2011-03-06 19:31:06 +01:00
|
|
|
if (find_opt_by_keyword(tokens, K_REJECT6) ||
|
|
|
|
find_opt_by_keyword(tokens, K_ACCEPT6)) {
|
|
|
|
log_warn(LD_DIR, "Rejecting router with reject6/accept6 line: they crash "
|
|
|
|
"older Tors.");
|
|
|
|
goto err;
|
|
|
|
}
|
2011-11-08 22:51:30 +01:00
|
|
|
{
|
|
|
|
smartlist_t *or_addresses = find_all_by_keyword(tokens, K_OR_ADDRESS);
|
|
|
|
if (or_addresses) {
|
2012-08-14 14:03:58 +02:00
|
|
|
find_single_ipv6_orport(or_addresses, &router->ipv6_addr,
|
|
|
|
&router->ipv6_orport);
|
2011-11-08 22:51:30 +01:00
|
|
|
smartlist_free(or_addresses);
|
|
|
|
}
|
|
|
|
}
|
2004-05-10 19:30:51 +02:00
|
|
|
exit_policy_tokens = find_all_exitpolicy(tokens);
|
2008-03-17 21:10:57 +01:00
|
|
|
if (!smartlist_len(exit_policy_tokens)) {
|
|
|
|
log_warn(LD_DIR, "No exit policy tokens in descriptor.");
|
|
|
|
goto err;
|
|
|
|
}
|
2004-05-10 19:30:51 +02:00
|
|
|
SMARTLIST_FOREACH(exit_policy_tokens, directory_token_t *, t,
|
|
|
|
if (router_add_exit_policy(router,t)<0) {
|
2006-02-13 11:33:00 +01:00
|
|
|
log_warn(LD_DIR,"Error in exit policy");
|
2005-05-02 23:48:25 +02:00
|
|
|
goto err;
|
|
|
|
});
|
2008-01-02 05:43:44 +01:00
|
|
|
policy_expand_private(&router->exit_policy);
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2012-10-25 03:59:55 +02:00
|
|
|
if ((tok = find_opt_by_keyword(tokens, K_IPV6_POLICY)) && tok->n_args) {
|
|
|
|
router->ipv6_exit_policy = parse_short_policy(tok->args[0]);
|
|
|
|
if (! router->ipv6_exit_policy) {
|
|
|
|
log_warn(LD_DIR , "Error in ipv6-policy %s", escaped(tok->args[0]));
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-31 20:05:56 +01:00
|
|
|
if (policy_is_reject_star(router->exit_policy, AF_INET, 1) &&
|
2012-11-15 02:51:41 +01:00
|
|
|
(!router->ipv6_exit_policy ||
|
|
|
|
short_policy_is_reject_star(router->ipv6_exit_policy)))
|
|
|
|
router->policy_is_reject_star = 1;
|
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
if ((tok = find_opt_by_keyword(tokens, K_FAMILY)) && tok->n_args) {
|
2004-10-15 03:58:11 +02:00
|
|
|
int i;
|
2012-01-18 21:53:30 +01:00
|
|
|
router->declared_family = smartlist_new();
|
2004-10-15 03:58:11 +02:00
|
|
|
for (i=0;i<tok->n_args;++i) {
|
|
|
|
if (!is_legal_nickname_or_hexdigest(tok->args[i])) {
|
2006-03-05 10:50:26 +01:00
|
|
|
log_warn(LD_DIR, "Illegal nickname %s in family line",
|
|
|
|
escaped(tok->args[i]));
|
2004-10-15 03:58:11 +02:00
|
|
|
goto err;
|
|
|
|
}
|
2016-10-27 11:26:06 +02:00
|
|
|
smartlist_add_strdup(router->declared_family, tok->args[i]);
|
2004-10-15 03:58:11 +02:00
|
|
|
}
|
|
|
|
}
|
2004-11-09 21:04:00 +01:00
|
|
|
|
2011-06-08 21:16:11 +02:00
|
|
|
if (find_opt_by_keyword(tokens, K_CACHES_EXTRA_INFO))
|
2007-04-16 06:17:58 +02:00
|
|
|
router->caches_extra_info = 1;
|
|
|
|
|
2011-06-08 21:16:11 +02:00
|
|
|
if (find_opt_by_keyword(tokens, K_ALLOW_SINGLE_HOP_EXITS))
|
2008-09-26 20:58:45 +02:00
|
|
|
router->allow_single_hop_exits = 1;
|
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
if ((tok = find_opt_by_keyword(tokens, K_EXTRA_INFO_DIGEST))) {
|
2007-04-16 18:28:06 +02:00
|
|
|
tor_assert(tok->n_args >= 1);
|
2007-04-16 06:17:58 +02:00
|
|
|
if (strlen(tok->args[0]) == HEX_DIGEST_LEN) {
|
2016-06-17 16:41:45 +02:00
|
|
|
if (base16_decode(router->cache_info.extra_info_digest, DIGEST_LEN,
|
|
|
|
tok->args[0], HEX_DIGEST_LEN) != DIGEST_LEN) {
|
|
|
|
log_warn(LD_DIR,"Invalid extra info digest");
|
|
|
|
}
|
2007-04-16 06:17:58 +02:00
|
|
|
} else {
|
|
|
|
log_warn(LD_DIR, "Invalid extra info digest %s", escaped(tok->args[0]));
|
|
|
|
}
|
2015-05-28 16:42:22 +02:00
|
|
|
|
|
|
|
if (tok->n_args >= 2) {
|
2016-05-18 02:08:03 +02:00
|
|
|
if (digest256_from_base64(router->cache_info.extra_info_digest256,
|
|
|
|
tok->args[1]) < 0) {
|
2015-05-28 16:42:22 +02:00
|
|
|
log_warn(LD_DIR, "Invalid extra info digest256 %s",
|
|
|
|
escaped(tok->args[1]));
|
|
|
|
}
|
|
|
|
}
|
2007-04-16 06:17:58 +02:00
|
|
|
}
|
|
|
|
|
2011-06-08 21:16:11 +02:00
|
|
|
if (find_opt_by_keyword(tokens, K_HIDDEN_SERVICE_DIR)) {
|
2007-10-29 20:10:42 +01:00
|
|
|
router->wants_to_be_hs_dir = 1;
|
|
|
|
}
|
|
|
|
|
2014-10-28 18:12:52 +01:00
|
|
|
/* This router accepts tunnelled directory requests via begindir if it has
|
|
|
|
* an open dirport or it included "tunnelled-dir-server". */
|
|
|
|
if (find_opt_by_keyword(tokens, K_DIR_TUNNELLED) || router->dir_port > 0) {
|
|
|
|
router->supports_tunnelled_dir_requests = 1;
|
|
|
|
}
|
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
tok = find_by_keyword(tokens, K_ROUTER_SIGNATURE);
|
2004-05-10 19:30:51 +02:00
|
|
|
|
2004-11-28 10:05:49 +01:00
|
|
|
if (!router->or_port) {
|
2006-02-13 11:33:00 +01:00
|
|
|
log_warn(LD_DIR,"or_port unreadable or 0. Failing.");
|
2004-05-10 19:30:51 +02:00
|
|
|
goto err;
|
|
|
|
}
|
2007-02-24 20:29:42 +01:00
|
|
|
|
2014-10-13 20:22:52 +02:00
|
|
|
/* We've checked everything that's covered by the hash. */
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
can_dl_again = 1;
|
|
|
|
if (check_signature_token(digest, DIGEST_LEN, tok, router->identity_pkey, 0,
|
|
|
|
"router descriptor") < 0)
|
|
|
|
goto err;
|
|
|
|
|
2004-05-10 19:30:51 +02:00
|
|
|
if (!router->platform) {
|
|
|
|
router->platform = tor_strdup("<unknown>");
|
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
err:
|
2009-08-20 18:45:03 +02:00
|
|
|
dump_desc(s_dup, "router descriptor");
|
2004-05-10 19:30:51 +02:00
|
|
|
routerinfo_free(router);
|
|
|
|
router = NULL;
|
|
|
|
done:
|
2018-08-23 20:05:42 +02:00
|
|
|
crypto_pk_free(rsa_pubkey);
|
2014-10-01 17:54:07 +02:00
|
|
|
tor_cert_free(ntor_cc_cert);
|
2004-05-10 19:30:51 +02:00
|
|
|
if (tokens) {
|
2009-09-28 16:37:01 +02:00
|
|
|
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
|
2004-05-10 19:30:51 +02:00
|
|
|
smartlist_free(tokens);
|
|
|
|
}
|
2009-12-12 08:07:59 +01:00
|
|
|
smartlist_free(exit_policy_tokens);
|
2008-03-26 18:50:27 +01:00
|
|
|
if (area) {
|
|
|
|
DUMP_AREA(area, "routerinfo");
|
2008-03-26 17:33:33 +01:00
|
|
|
memarea_drop_all(area);
|
2008-03-26 18:50:27 +01:00
|
|
|
}
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
if (can_dl_again_out)
|
|
|
|
*can_dl_again_out = can_dl_again;
|
2004-05-10 19:30:51 +02:00
|
|
|
return router;
|
|
|
|
}
|
|
|
|
|
2007-05-15 00:51:05 +02:00
|
|
|
/** Parse a single extrainfo entry from the string <b>s</b>, ending at
|
|
|
|
* <b>end</b>. (If <b>end</b> is NULL, parse up to the end of <b>s</b>.) If
|
|
|
|
* <b>cache_copy</b> is true, make a copy of the extra-info document in the
|
|
|
|
* cache_info fields of the result. If <b>routermap</b> is provided, use it
|
|
|
|
* as a map from router identity to routerinfo_t when looking up signing keys.
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
*
|
|
|
|
* If <b>can_dl_again_out</b> is provided, set *<b>can_dl_again_out</b> to 1
|
|
|
|
* if it's okay to try to download an extrainfo with this same digest again,
|
|
|
|
* and 0 if it isn't. (It might not be okay to download it again if part of
|
|
|
|
* the part covered by the digest is invalid.)
|
2007-05-15 00:51:05 +02:00
|
|
|
*/
|
2007-04-16 06:17:58 +02:00
|
|
|
extrainfo_t *
|
|
|
|
extrainfo_parse_entry_from_string(const char *s, const char *end,
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
int cache_copy, struct digest_ri_map_t *routermap,
|
|
|
|
int *can_dl_again_out)
|
2007-04-16 06:17:58 +02:00
|
|
|
{
|
|
|
|
extrainfo_t *extrainfo = NULL;
|
|
|
|
char digest[128];
|
|
|
|
smartlist_t *tokens = NULL;
|
|
|
|
directory_token_t *tok;
|
2012-01-18 21:53:30 +01:00
|
|
|
crypto_pk_t *key = NULL;
|
2008-01-10 18:48:40 +01:00
|
|
|
routerinfo_t *router = NULL;
|
2008-03-26 17:33:33 +01:00
|
|
|
memarea_t *area = NULL;
|
2009-08-20 18:45:03 +02:00
|
|
|
const char *s_dup = s;
|
2014-10-13 20:22:52 +02:00
|
|
|
/* Do not set this to '1' until we have parsed everything that we intend to
|
|
|
|
* parse that's covered by the hash. */
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
int can_dl_again = 0;
|
2007-04-16 06:17:58 +02:00
|
|
|
|
2017-03-06 17:31:11 +01:00
|
|
|
if (BUG(s == NULL))
|
|
|
|
return NULL;
|
|
|
|
|
2007-04-16 06:17:58 +02:00
|
|
|
if (!end) {
|
|
|
|
end = s + strlen(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* point 'end' to a point immediately after the final newline. */
|
|
|
|
while (end > s+2 && *(end-1) == '\n' && *(end-2) == '\n')
|
|
|
|
--end;
|
|
|
|
|
2012-05-10 23:27:16 +02:00
|
|
|
if (router_get_extrainfo_hash(s, end-s, digest) < 0) {
|
2007-04-16 06:17:58 +02:00
|
|
|
log_warn(LD_DIR, "Couldn't compute router hash.");
|
2009-01-13 15:43:51 +01:00
|
|
|
goto err;
|
2007-04-16 06:17:58 +02:00
|
|
|
}
|
2012-01-18 21:53:30 +01:00
|
|
|
tokens = smartlist_new();
|
2008-11-05 21:34:22 +01:00
|
|
|
area = memarea_new();
|
2008-03-26 17:33:33 +01:00
|
|
|
if (tokenize_string(area,s,end,tokens,extrainfo_token_table,0)) {
|
2007-09-26 18:19:44 +02:00
|
|
|
log_warn(LD_DIR, "Error tokenizing extra-info document.");
|
2007-04-16 06:17:58 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (smartlist_len(tokens) < 2) {
|
2007-09-26 18:19:44 +02:00
|
|
|
log_warn(LD_DIR, "Impossibly short extra-info document.");
|
2007-04-16 06:17:58 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2015-05-28 16:42:22 +02:00
|
|
|
/* XXXX Accept this in position 1 too, and ed identity in position 0. */
|
2007-04-16 06:17:58 +02:00
|
|
|
tok = smartlist_get(tokens,0);
|
|
|
|
if (tok->tp != K_EXTRA_INFO) {
|
|
|
|
log_warn(LD_DIR,"Entry does not start with \"extra-info\"");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
extrainfo = tor_malloc_zero(sizeof(extrainfo_t));
|
2007-05-20 07:15:53 +02:00
|
|
|
extrainfo->cache_info.is_extrainfo = 1;
|
2007-04-16 06:17:58 +02:00
|
|
|
if (cache_copy)
|
2013-04-18 16:30:14 +02:00
|
|
|
extrainfo->cache_info.signed_descriptor_body = tor_memdup_nulterm(s,end-s);
|
2007-04-16 06:17:58 +02:00
|
|
|
extrainfo->cache_info.signed_descriptor_len = end-s;
|
|
|
|
memcpy(extrainfo->cache_info.signed_descriptor_digest, digest, DIGEST_LEN);
|
2015-05-28 16:42:22 +02:00
|
|
|
crypto_digest256((char*)extrainfo->digest256, s, end-s, DIGEST_SHA256);
|
2007-04-16 19:55:08 +02:00
|
|
|
|
2007-04-16 18:28:06 +02:00
|
|
|
tor_assert(tok->n_args >= 2);
|
2007-04-16 06:17:58 +02:00
|
|
|
if (!is_legal_nickname(tok->args[0])) {
|
|
|
|
log_warn(LD_DIR,"Bad nickname %s on \"extra-info\"",escaped(tok->args[0]));
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
strlcpy(extrainfo->nickname, tok->args[0], sizeof(extrainfo->nickname));
|
|
|
|
if (strlen(tok->args[1]) != HEX_DIGEST_LEN ||
|
|
|
|
base16_decode(extrainfo->cache_info.identity_digest, DIGEST_LEN,
|
2016-06-17 16:41:45 +02:00
|
|
|
tok->args[1], HEX_DIGEST_LEN) != DIGEST_LEN) {
|
2007-04-16 06:17:58 +02:00
|
|
|
log_warn(LD_DIR,"Invalid fingerprint %s on \"extra-info\"",
|
|
|
|
escaped(tok->args[1]));
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
tok = find_by_keyword(tokens, K_PUBLISHED);
|
2007-04-16 06:17:58 +02:00
|
|
|
if (parse_iso_time(tok->args[0], &extrainfo->cache_info.published_on)) {
|
|
|
|
log_warn(LD_DIR,"Invalid published time %s on \"extra-info\"",
|
|
|
|
escaped(tok->args[0]));
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2015-05-28 16:42:22 +02:00
|
|
|
{
|
|
|
|
directory_token_t *ed_sig_tok, *ed_cert_tok;
|
|
|
|
ed_sig_tok = find_opt_by_keyword(tokens, K_ROUTER_SIG_ED25519);
|
|
|
|
ed_cert_tok = find_opt_by_keyword(tokens, K_IDENTITY_ED25519);
|
|
|
|
int n_ed_toks = !!ed_sig_tok + !!ed_cert_tok;
|
|
|
|
if (n_ed_toks != 0 && n_ed_toks != 2) {
|
|
|
|
log_warn(LD_DIR, "Router descriptor with only partial ed25519/"
|
|
|
|
"cross-certification support");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (ed_sig_tok) {
|
|
|
|
tor_assert(ed_cert_tok);
|
|
|
|
const int ed_cert_token_pos = smartlist_pos(tokens, ed_cert_tok);
|
|
|
|
if (ed_cert_token_pos != 1) {
|
|
|
|
/* Accept this in position 0 XXXX */
|
|
|
|
log_warn(LD_DIR, "Ed25519 certificate in wrong position");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (ed_sig_tok != smartlist_get(tokens, smartlist_len(tokens)-2)) {
|
|
|
|
log_warn(LD_DIR, "Ed25519 signature in wrong position");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (strcmp(ed_cert_tok->object_type, "ED25519 CERT")) {
|
|
|
|
log_warn(LD_DIR, "Wrong object type on identity-ed25519 in decriptor");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t d256[DIGEST256_LEN];
|
|
|
|
const char *signed_start, *signed_end;
|
|
|
|
tor_cert_t *cert = tor_cert_parse(
|
|
|
|
(const uint8_t*)ed_cert_tok->object_body,
|
|
|
|
ed_cert_tok->object_size);
|
|
|
|
if (! cert) {
|
|
|
|
log_warn(LD_DIR, "Couldn't parse ed25519 cert");
|
|
|
|
goto err;
|
|
|
|
}
|
2016-05-18 02:04:16 +02:00
|
|
|
/* makes sure it gets freed. */
|
|
|
|
extrainfo->cache_info.signing_key_cert = cert;
|
|
|
|
|
2015-05-28 16:42:22 +02:00
|
|
|
if (cert->cert_type != CERT_TYPE_ID_SIGNING ||
|
|
|
|
! cert->signing_key_included) {
|
|
|
|
log_warn(LD_DIR, "Invalid form for ed25519 cert");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (router_get_hash_impl_helper(s, end-s, "extra-info ",
|
|
|
|
"\nrouter-sig-ed25519",
|
2017-05-03 17:48:08 +02:00
|
|
|
' ', LOG_WARN,
|
|
|
|
&signed_start, &signed_end) < 0) {
|
2015-05-28 16:42:22 +02:00
|
|
|
log_warn(LD_DIR, "Can't find ed25519-signed portion of extrainfo");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
crypto_digest_t *d = crypto_digest256_new(DIGEST_SHA256);
|
|
|
|
crypto_digest_add_bytes(d, ED_DESC_SIGNATURE_PREFIX,
|
|
|
|
strlen(ED_DESC_SIGNATURE_PREFIX));
|
|
|
|
crypto_digest_add_bytes(d, signed_start, signed_end-signed_start);
|
|
|
|
crypto_digest_get_digest(d, (char*)d256, sizeof(d256));
|
|
|
|
crypto_digest_free(d);
|
|
|
|
|
|
|
|
ed25519_checkable_t check[2];
|
|
|
|
int check_ok[2];
|
2016-08-30 14:48:50 +02:00
|
|
|
if (tor_cert_get_checkable_sig(&check[0], cert, NULL, NULL) < 0) {
|
2015-05-28 16:42:22 +02:00
|
|
|
log_err(LD_BUG, "Couldn't create 'checkable' for cert.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ed25519_signature_from_base64(&check[1].signature,
|
|
|
|
ed_sig_tok->args[0])<0) {
|
|
|
|
log_warn(LD_DIR, "Couldn't decode ed25519 signature");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
check[1].pubkey = &cert->signed_key;
|
|
|
|
check[1].msg = d256;
|
|
|
|
check[1].len = DIGEST256_LEN;
|
|
|
|
|
|
|
|
if (ed25519_checksig_batch(check_ok, check, 2) < 0) {
|
|
|
|
log_warn(LD_DIR, "Incorrect ed25519 signature(s)");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
/* We don't check the certificate expiration time: checking that it
|
|
|
|
* matches the cert in the router descriptor is adequate. */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-13 20:22:52 +02:00
|
|
|
/* We've checked everything that's covered by the hash. */
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
can_dl_again = 1;
|
|
|
|
|
2007-04-16 06:17:58 +02:00
|
|
|
if (routermap &&
|
2007-08-14 04:23:57 +02:00
|
|
|
(router = digestmap_get((digestmap_t*)routermap,
|
2007-04-16 06:17:58 +02:00
|
|
|
extrainfo->cache_info.identity_digest))) {
|
|
|
|
key = router->identity_pkey;
|
|
|
|
}
|
|
|
|
|
2008-12-11 20:40:58 +01:00
|
|
|
tok = find_by_keyword(tokens, K_ROUTER_SIGNATURE);
|
2007-05-15 00:51:05 +02:00
|
|
|
if (strcmp(tok->object_type, "SIGNATURE") ||
|
|
|
|
tok->object_size < 128 || tok->object_size > 512) {
|
|
|
|
log_warn(LD_DIR, "Bad object type or length on extra-info signature");
|
2007-04-16 06:17:58 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (key) {
|
2009-09-14 18:02:00 +02:00
|
|
|
if (check_signature_token(digest, DIGEST_LEN, tok, key, 0,
|
|
|
|
"extra-info") < 0)
|
2007-04-16 06:17:58 +02:00
|
|
|
goto err;
|
2007-05-15 00:51:05 +02:00
|
|
|
|
2008-01-10 18:48:40 +01:00
|
|
|
if (router)
|
|
|
|
extrainfo->cache_info.send_unencrypted =
|
|
|
|
router->cache_info.send_unencrypted;
|
2007-04-16 06:17:58 +02:00
|
|
|
} else {
|
2007-05-15 00:51:05 +02:00
|
|
|
extrainfo->pending_sig = tor_memdup(tok->object_body,
|
|
|
|
tok->object_size);
|
|
|
|
extrainfo->pending_sig_len = tok->object_size;
|
2007-04-16 06:17:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
goto done;
|
|
|
|
err:
|
2009-08-20 18:45:03 +02:00
|
|
|
dump_desc(s_dup, "extra-info descriptor");
|
2009-12-12 08:07:59 +01:00
|
|
|
extrainfo_free(extrainfo);
|
2007-04-16 06:17:58 +02:00
|
|
|
extrainfo = NULL;
|
|
|
|
done:
|
|
|
|
if (tokens) {
|
2009-09-28 16:37:01 +02:00
|
|
|
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
|
2007-04-16 06:17:58 +02:00
|
|
|
smartlist_free(tokens);
|
|
|
|
}
|
2008-03-26 18:50:27 +01:00
|
|
|
if (area) {
|
|
|
|
DUMP_AREA(area, "extrainfo");
|
2008-03-26 17:33:33 +01:00
|
|
|
memarea_drop_all(area);
|
2008-03-26 18:50:27 +01:00
|
|
|
}
|
Treat unparseable (micro)descriptors and extrainfos as undownloadable
One pain point in evolving the Tor design and implementing has been
adding code that makes clients reject directory documents that they
previously would have accepted, if those descriptors actually exist.
When this happened, the clients would get the document, reject it,
and then decide to try downloading it again, ad infinitum. This
problem becomes particularly obnoxious with authorities, since if
some authorities accept a descriptor that others don't, the ones
that don't accept it would go crazy trying to re-fetch it over and
over. (See for example ticket #9286.)
This patch tries to solve this problem by tracking, if a descriptor
isn't parseable, what its digest was, and whether it is invalid
because of some flaw that applies to the portion containing the
digest. (This excludes RSA signature problems: RSA signatures
aren't included in the digest. This means that a directory
authority can still put another directory authority into a loop by
mentioning a descriptor, and then serving that descriptor with an
invalid RSA signatures. But that would also make the misbehaving
directory authority get DoSed by the server it's attacking, so it's
not much of an issue.)
We already have a mechanism to mark something undownloadable with
downloadstatus_mark_impossible(); we use that here for
microdescriptors, extrainfos, and router descriptors.
Unit tests to follow in another patch.
Closes ticket #11243.
2014-10-03 16:55:50 +02:00
|
|
|
if (can_dl_again_out)
|
|
|
|
*can_dl_again_out = can_dl_again;
|
2007-04-16 06:17:58 +02:00
|
|
|
return extrainfo;
|
|
|
|
}
|
|
|
|
|
2005-09-13 23:14:55 +02:00
|
|
|
/** Add an exit policy stored in the token <b>tok</b> to the router info in
|
|
|
|
* <b>router</b>. Return 0 on success, -1 on failure. */
|
2004-11-12 20:39:13 +01:00
|
|
|
static int
|
2005-09-13 23:14:55 +02:00
|
|
|
router_add_exit_policy(routerinfo_t *router, directory_token_t *tok)
|
2004-05-20 04:42:50 +02:00
|
|
|
{
|
2008-01-02 05:43:44 +01:00
|
|
|
addr_policy_t *newe;
|
2015-09-11 07:10:54 +02:00
|
|
|
/* Use the standard interpretation of accept/reject *, an IPv4 wildcard. */
|
2012-10-24 21:03:29 +02:00
|
|
|
newe = router_parse_addr_policy(tok, 0);
|
2004-05-20 04:42:50 +02:00
|
|
|
if (!newe)
|
|
|
|
return -1;
|
2008-01-02 05:43:44 +01:00
|
|
|
if (! router->exit_policy)
|
2012-01-18 21:53:30 +01:00
|
|
|
router->exit_policy = smartlist_new();
|
2008-01-02 05:43:44 +01:00
|
|
|
|
2015-09-11 07:10:54 +02:00
|
|
|
/* Ensure that in descriptors, accept/reject fields are followed by
|
|
|
|
* IPv4 addresses, and accept6/reject6 fields are followed by
|
|
|
|
* IPv6 addresses. Unlike torrcs, descriptor exit policies do not permit
|
|
|
|
* accept/reject followed by IPv6. */
|
2008-07-24 15:44:04 +02:00
|
|
|
if (((tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6) &&
|
|
|
|
tor_addr_family(&newe->addr) == AF_INET)
|
|
|
|
||
|
|
|
|
((tok->tp == K_ACCEPT || tok->tp == K_REJECT) &&
|
|
|
|
tor_addr_family(&newe->addr) == AF_INET6)) {
|
2015-09-11 07:10:54 +02:00
|
|
|
/* There's nothing the user can do about other relays' descriptors,
|
|
|
|
* so we don't provide usage advice here. */
|
2008-07-24 15:44:04 +02:00
|
|
|
log_warn(LD_DIR, "Mismatch between field type and address type in exit "
|
2015-09-15 19:59:30 +02:00
|
|
|
"policy '%s'. Discarding entire router descriptor.",
|
|
|
|
tok->n_args == 1 ? tok->args[0] : "");
|
2008-07-24 15:44:04 +02:00
|
|
|
addr_policy_free(newe);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2008-01-02 05:43:44 +01:00
|
|
|
smartlist_add(router->exit_policy, newe);
|
2004-05-20 04:42:50 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-05-10 19:30:51 +02:00
|
|
|
/** Return a newly allocated smartlist of all accept or reject tokens in
|
|
|
|
* <b>s</b>.
|
|
|
|
*/
|
|
|
|
static smartlist_t *
|
|
|
|
find_all_exitpolicy(smartlist_t *s)
|
|
|
|
{
|
2012-01-18 21:53:30 +01:00
|
|
|
smartlist_t *out = smartlist_new();
|
2004-05-10 19:30:51 +02:00
|
|
|
SMARTLIST_FOREACH(s, directory_token_t *, t,
|
2008-07-24 15:44:04 +02:00
|
|
|
if (t->tp == K_ACCEPT || t->tp == K_ACCEPT6 ||
|
|
|
|
t->tp == K_REJECT || t->tp == K_REJECT6)
|
|
|
|
smartlist_add(out,t));
|
2004-05-10 19:30:51 +02:00
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
2016-06-30 02:39:29 +02:00
|
|
|
/** Called on startup; right now we just handle scanning the unparseable
|
|
|
|
* descriptor dumps, but hang anything else we might need to do in the
|
|
|
|
* future here as well.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
routerparse_init(void)
|
|
|
|
{
|
2016-06-30 11:37:23 +02:00
|
|
|
/*
|
|
|
|
* Check both if the sandbox is active and whether it's configured; no
|
|
|
|
* point in loading all that if we won't be able to use it after the
|
|
|
|
* sandbox becomes active.
|
|
|
|
*/
|
|
|
|
if (!(sandbox_is_active() || get_options()->Sandbox)) {
|
2016-06-30 09:45:55 +02:00
|
|
|
dump_desc_init();
|
|
|
|
}
|
2016-06-30 02:39:29 +02:00
|
|
|
}
|
|
|
|
|
2016-06-18 00:35:58 +02:00
|
|
|
/** Clean up all data structures used by routerparse.c at exit */
|
|
|
|
void
|
|
|
|
routerparse_free_all(void)
|
|
|
|
{
|
|
|
|
dump_desc_fifo_cleanup();
|
|
|
|
}
|