diff --git a/src/ext/README b/src/ext/README
index b52c74d627..5501aba758 100644
--- a/src/ext/README
+++ b/src/ext/README
@@ -60,6 +60,11 @@ ed25519/ref10/*
Daniel Bernsten's portable ref10 implementation of ed25519.
Public domain.
+ed25519/donna/*
+
+ Andrew Moon's semi-portable ed25519-donna implementation of
+ ed25519. Public domain.
+
readpassphrase.[ch]
Portable readpassphrase implementation from OpenSSH portable, version
diff --git a/src/ext/ed25519/donna/README.md b/src/ext/ed25519/donna/README.md
new file mode 100644
index 0000000000..e09fc27e31
--- /dev/null
+++ b/src/ext/ed25519/donna/README.md
@@ -0,0 +1,183 @@
+[ed25519](http://ed25519.cr.yp.to/) is an
+[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA),
+developed by [Dan Bernstein](http://cr.yp.to/djb.html),
+[Niels Duif](http://www.nielsduif.nl/),
+[Tanja Lange](http://hyperelliptic.org/tanja),
+[Peter Schwabe](http://www.cryptojedi.org/users/peter/),
+and [Bo-Yin Yang](http://www.iis.sinica.edu.tw/pages/byyang/).
+
+This project provides performant, portable 32-bit & 64-bit implementations. All implementations are
+of course constant time in regard to secret data.
+
+#### Performance
+
+SSE2 code and benches have not been updated yet. I will do those next.
+
+Compilers versions are gcc 4.6.3, icc 13.1.1, clang 3.4-1~exp1.
+
+Batch verification time (in parentheses) is the average time per 1 verification in a batch of 64 signatures. Counts are in thousands of cycles.
+
+Note that SSE2 performance may be less impressive on AMD & older CPUs with slower SSE ops!
+
+Visual Studio performance for `ge25519_scalarmult_base_niels` will lag behind a bit until optimized assembler versions of `ge25519_scalarmult_base_choose_niels`
+are made.
+
+##### E5200 @ 2.5ghz, march=core2
+
+
+Implementation | Sign | gcc | icc | clang | Verify | gcc | icc | clang |
+
+ed25519-donna 64bit | | 100k | 110k | 137k | | 327k (144k) | 342k (163k) | 422k (194k) |
+amd64-64-24k | | 102k | | | | 355k (158k) | | |
+ed25519-donna-sse2 64bit | | 108k | 111k | 116k | | 353k (155k) | 345k (154k) | 360k (161k) |
+amd64-51-32k | | 116k | | | | 380k (175k) | | |
+ed25519-donna-sse2 32bit | | 147k | 147k | 156k | | 380k (178k) | 381k (173k) | 430k (192k) |
+ed25519-donna 32bit | | 597k | 335k | 380k | | 1693k (720k) | 1052k (453k) | 1141k (493k) |
+
+
+
+##### E3-1270 @ 3.4ghz, march=corei7-avx
+
+
+Implementation | Sign | gcc | icc | clang | Verify | gcc | icc | clang |
+
+amd64-64-24k | | 68k | | | | 225k (104k) | | |
+ed25519-donna 64bit | | 71k | 75k | 90k | | 226k (105k) | 226k (112k) | 277k (125k) |
+amd64-51-32k | | 72k | | | | 218k (107k) | | |
+ed25519-donna-sse2 64bit | | 79k | 82k | 92k | | 252k (122k) | 259k (124k) | 282k (131k) |
+ed25519-donna-sse2 32bit | | 94k | 95k | 103k | | 296k (146k) | 294k (137k) | 306k (147k) |
+ed25519-donna 32bit | | 525k | 299k | 316k | | 1502k (645k) | 959k (418k) | 954k (416k) |
+
+
+
+#### Compilation
+
+No configuration is needed **if you are compiling against OpenSSL**.
+
+##### Hash Options
+
+If you are not compiling aginst OpenSSL, you will need a hash function.
+
+To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`.
+This should never be used except to verify the code works when OpenSSL is not available.
+
+To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your
+custom hash implementation in ed25519-hash-custom.h. The hash must have a 512bit digest and implement
+
+ struct ed25519_hash_context;
+
+ void ed25519_hash_init(ed25519_hash_context *ctx);
+ void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen);
+ void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash);
+ void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen);
+
+##### Random Options
+
+If you are not compiling aginst OpenSSL, you will need a random function for batch verification.
+
+To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your
+custom hash implementation in ed25519-randombytes-custom.h. The random function must implement:
+
+ void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len);
+
+Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG
+variant of Bob Jenkins [ISAAC](http://en.wikipedia.org/wiki/ISAAC_%28cipher%29)
+
+##### Minor options
+
+Use `-DED25519_INLINE_ASM` to disable the use of custom assembler routines and instead rely on portable C.
+
+Use `-DED25519_FORCE_32BIT` to force the use of 32 bit routines even when compiling for 64 bit.
+
+##### 32-bit
+
+ gcc ed25519.c -m32 -O3 -c
+
+##### 64-bit
+
+ gcc ed25519.c -m64 -O3 -c
+
+##### SSE2
+
+ gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2
+ gcc ed25519.c -m64 -O3 -c -DED25519_SSE2
+
+clang and icc are also supported
+
+
+#### Usage
+
+To use the code, link against `ed25519.o -mbits` and:
+
+ #include "ed25519.h"
+
+Add `-lssl -lcrypto` when using OpenSSL (Some systems don't need -lcrypto? It might be trial and error).
+
+To generate a private key, simply generate 32 bytes from a secure
+cryptographic source:
+
+ ed25519_secret_key sk;
+ randombytes(sk, sizeof(ed25519_secret_key));
+
+To generate a public key:
+
+ ed25519_public_key pk;
+ ed25519_publickey(sk, pk);
+
+To sign a message:
+
+ ed25519_signature sig;
+ ed25519_sign(message, message_len, sk, pk, signature);
+
+To verify a signature:
+
+ int valid = ed25519_sign_open(message, message_len, pk, signature) == 0;
+
+To batch verify signatures:
+
+ const unsigned char *mp[num] = {message1, message2..}
+ size_t ml[num] = {message_len1, message_len2..}
+ const unsigned char *pkp[num] = {pk1, pk2..}
+ const unsigned char *sigp[num] = {signature1, signature2..}
+ int valid[num]
+
+ /* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */
+ int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0;
+
+**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in
+`ed25519-randombytes.h`, to generate random scalars for the verification code.
+The default implementation now uses OpenSSLs `RAND_bytes`.
+
+Unlike the [SUPERCOP](http://bench.cr.yp.to/supercop.html) version, signatures are
+not appended to messages, and there is no need for padding in front of messages.
+Additionally, the secret key does not contain a copy of the public key, so it is
+32 bytes instead of 64 bytes, and the public key must be provided to the signing
+function.
+
+##### Curve25519
+
+Curve25519 public keys can be generated thanks to
+[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html)
+leveraging Ed25519's precomputed basepoint scalar multiplication.
+
+ curved25519_key sk, pk;
+ randombytes(sk, sizeof(curved25519_key));
+ curved25519_scalarmult_basepoint(pk, sk);
+
+Note the name is curved25519, a combination of curve and ed25519, to prevent
+name clashes. Performance is slightly faster than short message ed25519
+signing due to both using the same code for the scalar multiply.
+
+#### Testing
+
+Fuzzing against reference implemenations is now available. See [fuzz/README](fuzz/README.md).
+
+Building `ed25519.c` with `-DED25519_TEST` and linking with `test.c` will run basic sanity tests
+and benchmark each function. `test-batch.c` has been incorporated in to `test.c`.
+
+`test-internals.c` is standalone and built the same way as `ed25519.c`. It tests the math primitives
+with extreme values to ensure they function correctly. SSE2 is now supported.
+
+#### Papers
+
+[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html)
\ No newline at end of file
diff --git a/src/ext/ed25519/donna/curve25519-donna-32bit.h b/src/ext/ed25519/donna/curve25519-donna-32bit.h
new file mode 100644
index 0000000000..b0861acf03
--- /dev/null
+++ b/src/ext/ed25519/donna/curve25519-donna-32bit.h
@@ -0,0 +1,579 @@
+/*
+ Public domain by Andrew M.
+ See: https://github.com/floodyberry/curve25519-donna
+
+ 32 bit integer curve25519 implementation
+*/
+
+typedef uint32_t bignum25519[10];
+typedef uint32_t bignum25519align16[12];
+
+static const uint32_t reduce_mask_25 = (1 << 25) - 1;
+static const uint32_t reduce_mask_26 = (1 << 26) - 1;
+
+
+/* out = in */
+DONNA_INLINE static void
+curve25519_copy(bignum25519 out, const bignum25519 in) {
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = in[2];
+ out[3] = in[3];
+ out[4] = in[4];
+ out[5] = in[5];
+ out[6] = in[6];
+ out[7] = in[7];
+ out[8] = in[8];
+ out[9] = in[9];
+}
+
+/* out = a + b */
+DONNA_INLINE static void
+curve25519_add(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ out[0] = a[0] + b[0];
+ out[1] = a[1] + b[1];
+ out[2] = a[2] + b[2];
+ out[3] = a[3] + b[3];
+ out[4] = a[4] + b[4];
+ out[5] = a[5] + b[5];
+ out[6] = a[6] + b[6];
+ out[7] = a[7] + b[7];
+ out[8] = a[8] + b[8];
+ out[9] = a[9] + b[9];
+}
+
+DONNA_INLINE static void
+curve25519_add_after_basic(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ uint32_t c;
+ out[0] = a[0] + b[0] ; c = (out[0] >> 26); out[0] &= reduce_mask_26;
+ out[1] = a[1] + b[1] + c; c = (out[1] >> 25); out[1] &= reduce_mask_25;
+ out[2] = a[2] + b[2] + c; c = (out[2] >> 26); out[2] &= reduce_mask_26;
+ out[3] = a[3] + b[3] + c; c = (out[3] >> 25); out[3] &= reduce_mask_25;
+ out[4] = a[4] + b[4] + c; c = (out[4] >> 26); out[4] &= reduce_mask_26;
+ out[5] = a[5] + b[5] + c; c = (out[5] >> 25); out[5] &= reduce_mask_25;
+ out[6] = a[6] + b[6] + c; c = (out[6] >> 26); out[6] &= reduce_mask_26;
+ out[7] = a[7] + b[7] + c; c = (out[7] >> 25); out[7] &= reduce_mask_25;
+ out[8] = a[8] + b[8] + c; c = (out[8] >> 26); out[8] &= reduce_mask_26;
+ out[9] = a[9] + b[9] + c; c = (out[9] >> 25); out[9] &= reduce_mask_25;
+ out[0] += 19 * c;
+}
+
+DONNA_INLINE static void
+curve25519_add_reduce(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ uint32_t c;
+ out[0] = a[0] + b[0] ; c = (out[0] >> 26); out[0] &= reduce_mask_26;
+ out[1] = a[1] + b[1] + c; c = (out[1] >> 25); out[1] &= reduce_mask_25;
+ out[2] = a[2] + b[2] + c; c = (out[2] >> 26); out[2] &= reduce_mask_26;
+ out[3] = a[3] + b[3] + c; c = (out[3] >> 25); out[3] &= reduce_mask_25;
+ out[4] = a[4] + b[4] + c; c = (out[4] >> 26); out[4] &= reduce_mask_26;
+ out[5] = a[5] + b[5] + c; c = (out[5] >> 25); out[5] &= reduce_mask_25;
+ out[6] = a[6] + b[6] + c; c = (out[6] >> 26); out[6] &= reduce_mask_26;
+ out[7] = a[7] + b[7] + c; c = (out[7] >> 25); out[7] &= reduce_mask_25;
+ out[8] = a[8] + b[8] + c; c = (out[8] >> 26); out[8] &= reduce_mask_26;
+ out[9] = a[9] + b[9] + c; c = (out[9] >> 25); out[9] &= reduce_mask_25;
+ out[0] += 19 * c;
+}
+
+/* multiples of p */
+static const uint32_t twoP0 = 0x07ffffda;
+static const uint32_t twoP13579 = 0x03fffffe;
+static const uint32_t twoP2468 = 0x07fffffe;
+static const uint32_t fourP0 = 0x0fffffb4;
+static const uint32_t fourP13579 = 0x07fffffc;
+static const uint32_t fourP2468 = 0x0ffffffc;
+
+/* out = a - b */
+DONNA_INLINE static void
+curve25519_sub(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ uint32_t c;
+ out[0] = twoP0 + a[0] - b[0] ; c = (out[0] >> 26); out[0] &= reduce_mask_26;
+ out[1] = twoP13579 + a[1] - b[1] + c; c = (out[1] >> 25); out[1] &= reduce_mask_25;
+ out[2] = twoP2468 + a[2] - b[2] + c; c = (out[2] >> 26); out[2] &= reduce_mask_26;
+ out[3] = twoP13579 + a[3] - b[3] + c; c = (out[3] >> 25); out[3] &= reduce_mask_25;
+ out[4] = twoP2468 + a[4] - b[4] + c;
+ out[5] = twoP13579 + a[5] - b[5] ;
+ out[6] = twoP2468 + a[6] - b[6] ;
+ out[7] = twoP13579 + a[7] - b[7] ;
+ out[8] = twoP2468 + a[8] - b[8] ;
+ out[9] = twoP13579 + a[9] - b[9] ;
+}
+
+/* out = a - b, where a is the result of a basic op (add,sub) */
+DONNA_INLINE static void
+curve25519_sub_after_basic(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ uint32_t c;
+ out[0] = fourP0 + a[0] - b[0] ; c = (out[0] >> 26); out[0] &= reduce_mask_26;
+ out[1] = fourP13579 + a[1] - b[1] + c; c = (out[1] >> 25); out[1] &= reduce_mask_25;
+ out[2] = fourP2468 + a[2] - b[2] + c; c = (out[2] >> 26); out[2] &= reduce_mask_26;
+ out[3] = fourP13579 + a[3] - b[3] + c; c = (out[3] >> 25); out[3] &= reduce_mask_25;
+ out[4] = fourP2468 + a[4] - b[4] + c; c = (out[4] >> 26); out[4] &= reduce_mask_26;
+ out[5] = fourP13579 + a[5] - b[5] + c; c = (out[5] >> 25); out[5] &= reduce_mask_25;
+ out[6] = fourP2468 + a[6] - b[6] + c; c = (out[6] >> 26); out[6] &= reduce_mask_26;
+ out[7] = fourP13579 + a[7] - b[7] + c; c = (out[7] >> 25); out[7] &= reduce_mask_25;
+ out[8] = fourP2468 + a[8] - b[8] + c; c = (out[8] >> 26); out[8] &= reduce_mask_26;
+ out[9] = fourP13579 + a[9] - b[9] + c; c = (out[9] >> 25); out[9] &= reduce_mask_25;
+ out[0] += 19 * c;
+}
+
+DONNA_INLINE static void
+curve25519_sub_reduce(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ uint32_t c;
+ out[0] = fourP0 + a[0] - b[0] ; c = (out[0] >> 26); out[0] &= reduce_mask_26;
+ out[1] = fourP13579 + a[1] - b[1] + c; c = (out[1] >> 25); out[1] &= reduce_mask_25;
+ out[2] = fourP2468 + a[2] - b[2] + c; c = (out[2] >> 26); out[2] &= reduce_mask_26;
+ out[3] = fourP13579 + a[3] - b[3] + c; c = (out[3] >> 25); out[3] &= reduce_mask_25;
+ out[4] = fourP2468 + a[4] - b[4] + c; c = (out[4] >> 26); out[4] &= reduce_mask_26;
+ out[5] = fourP13579 + a[5] - b[5] + c; c = (out[5] >> 25); out[5] &= reduce_mask_25;
+ out[6] = fourP2468 + a[6] - b[6] + c; c = (out[6] >> 26); out[6] &= reduce_mask_26;
+ out[7] = fourP13579 + a[7] - b[7] + c; c = (out[7] >> 25); out[7] &= reduce_mask_25;
+ out[8] = fourP2468 + a[8] - b[8] + c; c = (out[8] >> 26); out[8] &= reduce_mask_26;
+ out[9] = fourP13579 + a[9] - b[9] + c; c = (out[9] >> 25); out[9] &= reduce_mask_25;
+ out[0] += 19 * c;
+}
+
+/* out = -a */
+DONNA_INLINE static void
+curve25519_neg(bignum25519 out, const bignum25519 a) {
+ uint32_t c;
+ out[0] = twoP0 - a[0] ; c = (out[0] >> 26); out[0] &= reduce_mask_26;
+ out[1] = twoP13579 - a[1] + c; c = (out[1] >> 25); out[1] &= reduce_mask_25;
+ out[2] = twoP2468 - a[2] + c; c = (out[2] >> 26); out[2] &= reduce_mask_26;
+ out[3] = twoP13579 - a[3] + c; c = (out[3] >> 25); out[3] &= reduce_mask_25;
+ out[4] = twoP2468 - a[4] + c; c = (out[4] >> 26); out[4] &= reduce_mask_26;
+ out[5] = twoP13579 - a[5] + c; c = (out[5] >> 25); out[5] &= reduce_mask_25;
+ out[6] = twoP2468 - a[6] + c; c = (out[6] >> 26); out[6] &= reduce_mask_26;
+ out[7] = twoP13579 - a[7] + c; c = (out[7] >> 25); out[7] &= reduce_mask_25;
+ out[8] = twoP2468 - a[8] + c; c = (out[8] >> 26); out[8] &= reduce_mask_26;
+ out[9] = twoP13579 - a[9] + c; c = (out[9] >> 25); out[9] &= reduce_mask_25;
+ out[0] += 19 * c;
+}
+
+/* out = a * b */
+#define curve25519_mul_noinline curve25519_mul
+static void
+curve25519_mul(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
+ uint32_t s0,s1,s2,s3,s4,s5,s6,s7,s8,s9;
+ uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c;
+ uint32_t p;
+
+ r0 = b[0];
+ r1 = b[1];
+ r2 = b[2];
+ r3 = b[3];
+ r4 = b[4];
+ r5 = b[5];
+ r6 = b[6];
+ r7 = b[7];
+ r8 = b[8];
+ r9 = b[9];
+
+ s0 = a[0];
+ s1 = a[1];
+ s2 = a[2];
+ s3 = a[3];
+ s4 = a[4];
+ s5 = a[5];
+ s6 = a[6];
+ s7 = a[7];
+ s8 = a[8];
+ s9 = a[9];
+
+ m1 = mul32x32_64(r0, s1) + mul32x32_64(r1, s0);
+ m3 = mul32x32_64(r0, s3) + mul32x32_64(r1, s2) + mul32x32_64(r2, s1) + mul32x32_64(r3, s0);
+ m5 = mul32x32_64(r0, s5) + mul32x32_64(r1, s4) + mul32x32_64(r2, s3) + mul32x32_64(r3, s2) + mul32x32_64(r4, s1) + mul32x32_64(r5, s0);
+ m7 = mul32x32_64(r0, s7) + mul32x32_64(r1, s6) + mul32x32_64(r2, s5) + mul32x32_64(r3, s4) + mul32x32_64(r4, s3) + mul32x32_64(r5, s2) + mul32x32_64(r6, s1) + mul32x32_64(r7, s0);
+ m9 = mul32x32_64(r0, s9) + mul32x32_64(r1, s8) + mul32x32_64(r2, s7) + mul32x32_64(r3, s6) + mul32x32_64(r4, s5) + mul32x32_64(r5, s4) + mul32x32_64(r6, s3) + mul32x32_64(r7, s2) + mul32x32_64(r8, s1) + mul32x32_64(r9, s0);
+
+ r1 *= 2;
+ r3 *= 2;
+ r5 *= 2;
+ r7 *= 2;
+
+ m0 = mul32x32_64(r0, s0);
+ m2 = mul32x32_64(r0, s2) + mul32x32_64(r1, s1) + mul32x32_64(r2, s0);
+ m4 = mul32x32_64(r0, s4) + mul32x32_64(r1, s3) + mul32x32_64(r2, s2) + mul32x32_64(r3, s1) + mul32x32_64(r4, s0);
+ m6 = mul32x32_64(r0, s6) + mul32x32_64(r1, s5) + mul32x32_64(r2, s4) + mul32x32_64(r3, s3) + mul32x32_64(r4, s2) + mul32x32_64(r5, s1) + mul32x32_64(r6, s0);
+ m8 = mul32x32_64(r0, s8) + mul32x32_64(r1, s7) + mul32x32_64(r2, s6) + mul32x32_64(r3, s5) + mul32x32_64(r4, s4) + mul32x32_64(r5, s3) + mul32x32_64(r6, s2) + mul32x32_64(r7, s1) + mul32x32_64(r8, s0);
+
+ r1 *= 19;
+ r2 *= 19;
+ r3 = (r3 / 2) * 19;
+ r4 *= 19;
+ r5 = (r5 / 2) * 19;
+ r6 *= 19;
+ r7 = (r7 / 2) * 19;
+ r8 *= 19;
+ r9 *= 19;
+
+ m1 += (mul32x32_64(r9, s2) + mul32x32_64(r8, s3) + mul32x32_64(r7, s4) + mul32x32_64(r6, s5) + mul32x32_64(r5, s6) + mul32x32_64(r4, s7) + mul32x32_64(r3, s8) + mul32x32_64(r2, s9));
+ m3 += (mul32x32_64(r9, s4) + mul32x32_64(r8, s5) + mul32x32_64(r7, s6) + mul32x32_64(r6, s7) + mul32x32_64(r5, s8) + mul32x32_64(r4, s9));
+ m5 += (mul32x32_64(r9, s6) + mul32x32_64(r8, s7) + mul32x32_64(r7, s8) + mul32x32_64(r6, s9));
+ m7 += (mul32x32_64(r9, s8) + mul32x32_64(r8, s9));
+
+ r3 *= 2;
+ r5 *= 2;
+ r7 *= 2;
+ r9 *= 2;
+
+ m0 += (mul32x32_64(r9, s1) + mul32x32_64(r8, s2) + mul32x32_64(r7, s3) + mul32x32_64(r6, s4) + mul32x32_64(r5, s5) + mul32x32_64(r4, s6) + mul32x32_64(r3, s7) + mul32x32_64(r2, s8) + mul32x32_64(r1, s9));
+ m2 += (mul32x32_64(r9, s3) + mul32x32_64(r8, s4) + mul32x32_64(r7, s5) + mul32x32_64(r6, s6) + mul32x32_64(r5, s7) + mul32x32_64(r4, s8) + mul32x32_64(r3, s9));
+ m4 += (mul32x32_64(r9, s5) + mul32x32_64(r8, s6) + mul32x32_64(r7, s7) + mul32x32_64(r6, s8) + mul32x32_64(r5, s9));
+ m6 += (mul32x32_64(r9, s7) + mul32x32_64(r8, s8) + mul32x32_64(r7, s9));
+ m8 += (mul32x32_64(r9, s9));
+
+ r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26);
+ m1 += c; r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25);
+ m2 += c; r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26);
+ m3 += c; r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25);
+ m4 += c; r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26);
+ m5 += c; r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25);
+ m6 += c; r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26);
+ m7 += c; r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25);
+ m8 += c; r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26);
+ m9 += c; r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25);
+ m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26);
+ r1 += p;
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+ out[5] = r5;
+ out[6] = r6;
+ out[7] = r7;
+ out[8] = r8;
+ out[9] = r9;
+}
+
+/* out = in*in */
+static void
+curve25519_square(bignum25519 out, const bignum25519 in) {
+ uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
+ uint32_t d6,d7,d8,d9;
+ uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c;
+ uint32_t p;
+
+ r0 = in[0];
+ r1 = in[1];
+ r2 = in[2];
+ r3 = in[3];
+ r4 = in[4];
+ r5 = in[5];
+ r6 = in[6];
+ r7 = in[7];
+ r8 = in[8];
+ r9 = in[9];
+
+ m0 = mul32x32_64(r0, r0);
+ r0 *= 2;
+ m1 = mul32x32_64(r0, r1);
+ m2 = mul32x32_64(r0, r2) + mul32x32_64(r1, r1 * 2);
+ r1 *= 2;
+ m3 = mul32x32_64(r0, r3) + mul32x32_64(r1, r2 );
+ m4 = mul32x32_64(r0, r4) + mul32x32_64(r1, r3 * 2) + mul32x32_64(r2, r2);
+ r2 *= 2;
+ m5 = mul32x32_64(r0, r5) + mul32x32_64(r1, r4 ) + mul32x32_64(r2, r3);
+ m6 = mul32x32_64(r0, r6) + mul32x32_64(r1, r5 * 2) + mul32x32_64(r2, r4) + mul32x32_64(r3, r3 * 2);
+ r3 *= 2;
+ m7 = mul32x32_64(r0, r7) + mul32x32_64(r1, r6 ) + mul32x32_64(r2, r5) + mul32x32_64(r3, r4 );
+ m8 = mul32x32_64(r0, r8) + mul32x32_64(r1, r7 * 2) + mul32x32_64(r2, r6) + mul32x32_64(r3, r5 * 2) + mul32x32_64(r4, r4 );
+ m9 = mul32x32_64(r0, r9) + mul32x32_64(r1, r8 ) + mul32x32_64(r2, r7) + mul32x32_64(r3, r6 ) + mul32x32_64(r4, r5 * 2);
+
+ d6 = r6 * 19;
+ d7 = r7 * 2 * 19;
+ d8 = r8 * 19;
+ d9 = r9 * 2 * 19;
+
+ m0 += (mul32x32_64(d9, r1 ) + mul32x32_64(d8, r2 ) + mul32x32_64(d7, r3 ) + mul32x32_64(d6, r4 * 2) + mul32x32_64(r5, r5 * 2 * 19));
+ m1 += (mul32x32_64(d9, r2 / 2) + mul32x32_64(d8, r3 ) + mul32x32_64(d7, r4 ) + mul32x32_64(d6, r5 * 2));
+ m2 += (mul32x32_64(d9, r3 ) + mul32x32_64(d8, r4 * 2) + mul32x32_64(d7, r5 * 2) + mul32x32_64(d6, r6 ));
+ m3 += (mul32x32_64(d9, r4 ) + mul32x32_64(d8, r5 * 2) + mul32x32_64(d7, r6 ));
+ m4 += (mul32x32_64(d9, r5 * 2) + mul32x32_64(d8, r6 * 2) + mul32x32_64(d7, r7 ));
+ m5 += (mul32x32_64(d9, r6 ) + mul32x32_64(d8, r7 * 2));
+ m6 += (mul32x32_64(d9, r7 * 2) + mul32x32_64(d8, r8 ));
+ m7 += (mul32x32_64(d9, r8 ));
+ m8 += (mul32x32_64(d9, r9 ));
+
+ r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26);
+ m1 += c; r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25);
+ m2 += c; r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26);
+ m3 += c; r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25);
+ m4 += c; r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26);
+ m5 += c; r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25);
+ m6 += c; r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26);
+ m7 += c; r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25);
+ m8 += c; r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26);
+ m9 += c; r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25);
+ m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26);
+ r1 += p;
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+ out[5] = r5;
+ out[6] = r6;
+ out[7] = r7;
+ out[8] = r8;
+ out[9] = r9;
+}
+
+
+/* out = in ^ (2 * count) */
+static void
+curve25519_square_times(bignum25519 out, const bignum25519 in, int count) {
+ uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
+ uint32_t d6,d7,d8,d9;
+ uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c;
+ uint32_t p;
+
+ r0 = in[0];
+ r1 = in[1];
+ r2 = in[2];
+ r3 = in[3];
+ r4 = in[4];
+ r5 = in[5];
+ r6 = in[6];
+ r7 = in[7];
+ r8 = in[8];
+ r9 = in[9];
+
+ do {
+ m0 = mul32x32_64(r0, r0);
+ r0 *= 2;
+ m1 = mul32x32_64(r0, r1);
+ m2 = mul32x32_64(r0, r2) + mul32x32_64(r1, r1 * 2);
+ r1 *= 2;
+ m3 = mul32x32_64(r0, r3) + mul32x32_64(r1, r2 );
+ m4 = mul32x32_64(r0, r4) + mul32x32_64(r1, r3 * 2) + mul32x32_64(r2, r2);
+ r2 *= 2;
+ m5 = mul32x32_64(r0, r5) + mul32x32_64(r1, r4 ) + mul32x32_64(r2, r3);
+ m6 = mul32x32_64(r0, r6) + mul32x32_64(r1, r5 * 2) + mul32x32_64(r2, r4) + mul32x32_64(r3, r3 * 2);
+ r3 *= 2;
+ m7 = mul32x32_64(r0, r7) + mul32x32_64(r1, r6 ) + mul32x32_64(r2, r5) + mul32x32_64(r3, r4 );
+ m8 = mul32x32_64(r0, r8) + mul32x32_64(r1, r7 * 2) + mul32x32_64(r2, r6) + mul32x32_64(r3, r5 * 2) + mul32x32_64(r4, r4 );
+ m9 = mul32x32_64(r0, r9) + mul32x32_64(r1, r8 ) + mul32x32_64(r2, r7) + mul32x32_64(r3, r6 ) + mul32x32_64(r4, r5 * 2);
+
+ d6 = r6 * 19;
+ d7 = r7 * 2 * 19;
+ d8 = r8 * 19;
+ d9 = r9 * 2 * 19;
+
+ m0 += (mul32x32_64(d9, r1 ) + mul32x32_64(d8, r2 ) + mul32x32_64(d7, r3 ) + mul32x32_64(d6, r4 * 2) + mul32x32_64(r5, r5 * 2 * 19));
+ m1 += (mul32x32_64(d9, r2 / 2) + mul32x32_64(d8, r3 ) + mul32x32_64(d7, r4 ) + mul32x32_64(d6, r5 * 2));
+ m2 += (mul32x32_64(d9, r3 ) + mul32x32_64(d8, r4 * 2) + mul32x32_64(d7, r5 * 2) + mul32x32_64(d6, r6 ));
+ m3 += (mul32x32_64(d9, r4 ) + mul32x32_64(d8, r5 * 2) + mul32x32_64(d7, r6 ));
+ m4 += (mul32x32_64(d9, r5 * 2) + mul32x32_64(d8, r6 * 2) + mul32x32_64(d7, r7 ));
+ m5 += (mul32x32_64(d9, r6 ) + mul32x32_64(d8, r7 * 2));
+ m6 += (mul32x32_64(d9, r7 * 2) + mul32x32_64(d8, r8 ));
+ m7 += (mul32x32_64(d9, r8 ));
+ m8 += (mul32x32_64(d9, r9 ));
+
+ r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26);
+ m1 += c; r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25);
+ m2 += c; r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26);
+ m3 += c; r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25);
+ m4 += c; r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26);
+ m5 += c; r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25);
+ m6 += c; r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26);
+ m7 += c; r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25);
+ m8 += c; r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26);
+ m9 += c; r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25);
+ m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26);
+ r1 += p;
+ } while (--count);
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+ out[5] = r5;
+ out[6] = r6;
+ out[7] = r7;
+ out[8] = r8;
+ out[9] = r9;
+}
+
+/* Take a little-endian, 32-byte number and expand it into polynomial form */
+static void
+curve25519_expand(bignum25519 out, const unsigned char in[32]) {
+ static const union { uint8_t b[2]; uint16_t s; } endian_check = {{1,0}};
+ uint32_t x0,x1,x2,x3,x4,x5,x6,x7;
+
+ if (endian_check.s == 1) {
+ x0 = *(uint32_t *)(in + 0);
+ x1 = *(uint32_t *)(in + 4);
+ x2 = *(uint32_t *)(in + 8);
+ x3 = *(uint32_t *)(in + 12);
+ x4 = *(uint32_t *)(in + 16);
+ x5 = *(uint32_t *)(in + 20);
+ x6 = *(uint32_t *)(in + 24);
+ x7 = *(uint32_t *)(in + 28);
+ } else {
+ #define F(s) \
+ ((((uint32_t)in[s + 0]) ) | \
+ (((uint32_t)in[s + 1]) << 8) | \
+ (((uint32_t)in[s + 2]) << 16) | \
+ (((uint32_t)in[s + 3]) << 24))
+ x0 = F(0);
+ x1 = F(4);
+ x2 = F(8);
+ x3 = F(12);
+ x4 = F(16);
+ x5 = F(20);
+ x6 = F(24);
+ x7 = F(28);
+ #undef F
+ }
+
+ out[0] = ( x0 ) & 0x3ffffff;
+ out[1] = ((((uint64_t)x1 << 32) | x0) >> 26) & 0x1ffffff;
+ out[2] = ((((uint64_t)x2 << 32) | x1) >> 19) & 0x3ffffff;
+ out[3] = ((((uint64_t)x3 << 32) | x2) >> 13) & 0x1ffffff;
+ out[4] = (( x3) >> 6) & 0x3ffffff;
+ out[5] = ( x4 ) & 0x1ffffff;
+ out[6] = ((((uint64_t)x5 << 32) | x4) >> 25) & 0x3ffffff;
+ out[7] = ((((uint64_t)x6 << 32) | x5) >> 19) & 0x1ffffff;
+ out[8] = ((((uint64_t)x7 << 32) | x6) >> 12) & 0x3ffffff;
+ out[9] = (( x7) >> 6) & 0x1ffffff;
+}
+
+/* Take a fully reduced polynomial form number and contract it into a
+ * little-endian, 32-byte array
+ */
+static void
+curve25519_contract(unsigned char out[32], const bignum25519 in) {
+ bignum25519 f;
+ curve25519_copy(f, in);
+
+ #define carry_pass() \
+ f[1] += f[0] >> 26; f[0] &= reduce_mask_26; \
+ f[2] += f[1] >> 25; f[1] &= reduce_mask_25; \
+ f[3] += f[2] >> 26; f[2] &= reduce_mask_26; \
+ f[4] += f[3] >> 25; f[3] &= reduce_mask_25; \
+ f[5] += f[4] >> 26; f[4] &= reduce_mask_26; \
+ f[6] += f[5] >> 25; f[5] &= reduce_mask_25; \
+ f[7] += f[6] >> 26; f[6] &= reduce_mask_26; \
+ f[8] += f[7] >> 25; f[7] &= reduce_mask_25; \
+ f[9] += f[8] >> 26; f[8] &= reduce_mask_26;
+
+ #define carry_pass_full() \
+ carry_pass() \
+ f[0] += 19 * (f[9] >> 25); f[9] &= reduce_mask_25;
+
+ #define carry_pass_final() \
+ carry_pass() \
+ f[9] &= reduce_mask_25;
+
+ carry_pass_full()
+ carry_pass_full()
+
+ /* now t is between 0 and 2^255-1, properly carried. */
+ /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
+ f[0] += 19;
+ carry_pass_full()
+
+ /* now between 19 and 2^255-1 in both cases, and offset by 19. */
+ f[0] += (reduce_mask_26 + 1) - 19;
+ f[1] += (reduce_mask_25 + 1) - 1;
+ f[2] += (reduce_mask_26 + 1) - 1;
+ f[3] += (reduce_mask_25 + 1) - 1;
+ f[4] += (reduce_mask_26 + 1) - 1;
+ f[5] += (reduce_mask_25 + 1) - 1;
+ f[6] += (reduce_mask_26 + 1) - 1;
+ f[7] += (reduce_mask_25 + 1) - 1;
+ f[8] += (reduce_mask_26 + 1) - 1;
+ f[9] += (reduce_mask_25 + 1) - 1;
+
+ /* now between 2^255 and 2^256-20, and offset by 2^255. */
+ carry_pass_final()
+
+ #undef carry_pass
+ #undef carry_full
+ #undef carry_final
+
+ f[1] <<= 2;
+ f[2] <<= 3;
+ f[3] <<= 5;
+ f[4] <<= 6;
+ f[6] <<= 1;
+ f[7] <<= 3;
+ f[8] <<= 4;
+ f[9] <<= 6;
+
+ #define F(i, s) \
+ out[s+0] |= (unsigned char )(f[i] & 0xff); \
+ out[s+1] = (unsigned char )((f[i] >> 8) & 0xff); \
+ out[s+2] = (unsigned char )((f[i] >> 16) & 0xff); \
+ out[s+3] = (unsigned char )((f[i] >> 24) & 0xff);
+
+ out[0] = 0;
+ out[16] = 0;
+ F(0,0);
+ F(1,3);
+ F(2,6);
+ F(3,9);
+ F(4,12);
+ F(5,16);
+ F(6,19);
+ F(7,22);
+ F(8,25);
+ F(9,28);
+ #undef F
+}
+
+
+/* out = (flag) ? in : out */
+DONNA_INLINE static void
+curve25519_move_conditional_bytes(uint8_t out[96], const uint8_t in[96], uint32_t flag) {
+ const uint32_t nb = flag - 1, b = ~nb;
+ const uint32_t *inl = (const uint32_t *)in;
+ uint32_t *outl = (uint32_t *)out;
+ outl[0] = (outl[0] & nb) | (inl[0] & b);
+ outl[1] = (outl[1] & nb) | (inl[1] & b);
+ outl[2] = (outl[2] & nb) | (inl[2] & b);
+ outl[3] = (outl[3] & nb) | (inl[3] & b);
+ outl[4] = (outl[4] & nb) | (inl[4] & b);
+ outl[5] = (outl[5] & nb) | (inl[5] & b);
+ outl[6] = (outl[6] & nb) | (inl[6] & b);
+ outl[7] = (outl[7] & nb) | (inl[7] & b);
+ outl[8] = (outl[8] & nb) | (inl[8] & b);
+ outl[9] = (outl[9] & nb) | (inl[9] & b);
+ outl[10] = (outl[10] & nb) | (inl[10] & b);
+ outl[11] = (outl[11] & nb) | (inl[11] & b);
+ outl[12] = (outl[12] & nb) | (inl[12] & b);
+ outl[13] = (outl[13] & nb) | (inl[13] & b);
+ outl[14] = (outl[14] & nb) | (inl[14] & b);
+ outl[15] = (outl[15] & nb) | (inl[15] & b);
+ outl[16] = (outl[16] & nb) | (inl[16] & b);
+ outl[17] = (outl[17] & nb) | (inl[17] & b);
+ outl[18] = (outl[18] & nb) | (inl[18] & b);
+ outl[19] = (outl[19] & nb) | (inl[19] & b);
+ outl[20] = (outl[20] & nb) | (inl[20] & b);
+ outl[21] = (outl[21] & nb) | (inl[21] & b);
+ outl[22] = (outl[22] & nb) | (inl[22] & b);
+ outl[23] = (outl[23] & nb) | (inl[23] & b);
+
+}
+
+/* if (iswap) swap(a, b) */
+DONNA_INLINE static void
+curve25519_swap_conditional(bignum25519 a, bignum25519 b, uint32_t iswap) {
+ const uint32_t swap = (uint32_t)(-(int32_t)iswap);
+ uint32_t x0,x1,x2,x3,x4,x5,x6,x7,x8,x9;
+
+ x0 = swap & (a[0] ^ b[0]); a[0] ^= x0; b[0] ^= x0;
+ x1 = swap & (a[1] ^ b[1]); a[1] ^= x1; b[1] ^= x1;
+ x2 = swap & (a[2] ^ b[2]); a[2] ^= x2; b[2] ^= x2;
+ x3 = swap & (a[3] ^ b[3]); a[3] ^= x3; b[3] ^= x3;
+ x4 = swap & (a[4] ^ b[4]); a[4] ^= x4; b[4] ^= x4;
+ x5 = swap & (a[5] ^ b[5]); a[5] ^= x5; b[5] ^= x5;
+ x6 = swap & (a[6] ^ b[6]); a[6] ^= x6; b[6] ^= x6;
+ x7 = swap & (a[7] ^ b[7]); a[7] ^= x7; b[7] ^= x7;
+ x8 = swap & (a[8] ^ b[8]); a[8] ^= x8; b[8] ^= x8;
+ x9 = swap & (a[9] ^ b[9]); a[9] ^= x9; b[9] ^= x9;
+}
diff --git a/src/ext/ed25519/donna/curve25519-donna-64bit.h b/src/ext/ed25519/donna/curve25519-donna-64bit.h
new file mode 100644
index 0000000000..2941d1bcdc
--- /dev/null
+++ b/src/ext/ed25519/donna/curve25519-donna-64bit.h
@@ -0,0 +1,413 @@
+/*
+ Public domain by Adam Langley &
+ Andrew M.
+ See: https://github.com/floodyberry/curve25519-donna
+
+ 64bit integer curve25519 implementation
+*/
+
+typedef uint64_t bignum25519[5];
+
+static const uint64_t reduce_mask_40 = ((uint64_t)1 << 40) - 1;
+static const uint64_t reduce_mask_51 = ((uint64_t)1 << 51) - 1;
+static const uint64_t reduce_mask_56 = ((uint64_t)1 << 56) - 1;
+
+/* out = in */
+DONNA_INLINE static void
+curve25519_copy(bignum25519 out, const bignum25519 in) {
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = in[2];
+ out[3] = in[3];
+ out[4] = in[4];
+}
+
+/* out = a + b */
+DONNA_INLINE static void
+curve25519_add(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ out[0] = a[0] + b[0];
+ out[1] = a[1] + b[1];
+ out[2] = a[2] + b[2];
+ out[3] = a[3] + b[3];
+ out[4] = a[4] + b[4];
+}
+
+/* out = a + b, where a and/or b are the result of a basic op (add,sub) */
+DONNA_INLINE static void
+curve25519_add_after_basic(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ out[0] = a[0] + b[0];
+ out[1] = a[1] + b[1];
+ out[2] = a[2] + b[2];
+ out[3] = a[3] + b[3];
+ out[4] = a[4] + b[4];
+}
+
+DONNA_INLINE static void
+curve25519_add_reduce(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ uint64_t c;
+ out[0] = a[0] + b[0] ; c = (out[0] >> 51); out[0] &= reduce_mask_51;
+ out[1] = a[1] + b[1] + c; c = (out[1] >> 51); out[1] &= reduce_mask_51;
+ out[2] = a[2] + b[2] + c; c = (out[2] >> 51); out[2] &= reduce_mask_51;
+ out[3] = a[3] + b[3] + c; c = (out[3] >> 51); out[3] &= reduce_mask_51;
+ out[4] = a[4] + b[4] + c; c = (out[4] >> 51); out[4] &= reduce_mask_51;
+ out[0] += c * 19;
+}
+
+/* multiples of p */
+static const uint64_t twoP0 = 0x0fffffffffffda;
+static const uint64_t twoP1234 = 0x0ffffffffffffe;
+static const uint64_t fourP0 = 0x1fffffffffffb4;
+static const uint64_t fourP1234 = 0x1ffffffffffffc;
+
+/* out = a - b */
+DONNA_INLINE static void
+curve25519_sub(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ out[0] = a[0] + twoP0 - b[0];
+ out[1] = a[1] + twoP1234 - b[1];
+ out[2] = a[2] + twoP1234 - b[2];
+ out[3] = a[3] + twoP1234 - b[3];
+ out[4] = a[4] + twoP1234 - b[4];
+}
+
+/* out = a - b, where a and/or b are the result of a basic op (add,sub) */
+DONNA_INLINE static void
+curve25519_sub_after_basic(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ out[0] = a[0] + fourP0 - b[0];
+ out[1] = a[1] + fourP1234 - b[1];
+ out[2] = a[2] + fourP1234 - b[2];
+ out[3] = a[3] + fourP1234 - b[3];
+ out[4] = a[4] + fourP1234 - b[4];
+}
+
+DONNA_INLINE static void
+curve25519_sub_reduce(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ uint64_t c;
+ out[0] = a[0] + fourP0 - b[0] ; c = (out[0] >> 51); out[0] &= reduce_mask_51;
+ out[1] = a[1] + fourP1234 - b[1] + c; c = (out[1] >> 51); out[1] &= reduce_mask_51;
+ out[2] = a[2] + fourP1234 - b[2] + c; c = (out[2] >> 51); out[2] &= reduce_mask_51;
+ out[3] = a[3] + fourP1234 - b[3] + c; c = (out[3] >> 51); out[3] &= reduce_mask_51;
+ out[4] = a[4] + fourP1234 - b[4] + c; c = (out[4] >> 51); out[4] &= reduce_mask_51;
+ out[0] += c * 19;
+}
+
+/* out = -a */
+DONNA_INLINE static void
+curve25519_neg(bignum25519 out, const bignum25519 a) {
+ uint64_t c;
+ out[0] = twoP0 - a[0] ; c = (out[0] >> 51); out[0] &= reduce_mask_51;
+ out[1] = twoP1234 - a[1] + c; c = (out[1] >> 51); out[1] &= reduce_mask_51;
+ out[2] = twoP1234 - a[2] + c; c = (out[2] >> 51); out[2] &= reduce_mask_51;
+ out[3] = twoP1234 - a[3] + c; c = (out[3] >> 51); out[3] &= reduce_mask_51;
+ out[4] = twoP1234 - a[4] + c; c = (out[4] >> 51); out[4] &= reduce_mask_51;
+ out[0] += c * 19;
+}
+
+/* out = a * b */
+DONNA_INLINE static void
+curve25519_mul(bignum25519 out, const bignum25519 in2, const bignum25519 in) {
+#if !defined(HAVE_NATIVE_UINT128)
+ uint128_t mul;
+#endif
+ uint128_t t[5];
+ uint64_t r0,r1,r2,r3,r4,s0,s1,s2,s3,s4,c;
+
+ r0 = in[0];
+ r1 = in[1];
+ r2 = in[2];
+ r3 = in[3];
+ r4 = in[4];
+
+ s0 = in2[0];
+ s1 = in2[1];
+ s2 = in2[2];
+ s3 = in2[3];
+ s4 = in2[4];
+
+#if defined(HAVE_NATIVE_UINT128)
+ t[0] = ((uint128_t) r0) * s0;
+ t[1] = ((uint128_t) r0) * s1 + ((uint128_t) r1) * s0;
+ t[2] = ((uint128_t) r0) * s2 + ((uint128_t) r2) * s0 + ((uint128_t) r1) * s1;
+ t[3] = ((uint128_t) r0) * s3 + ((uint128_t) r3) * s0 + ((uint128_t) r1) * s2 + ((uint128_t) r2) * s1;
+ t[4] = ((uint128_t) r0) * s4 + ((uint128_t) r4) * s0 + ((uint128_t) r3) * s1 + ((uint128_t) r1) * s3 + ((uint128_t) r2) * s2;
+#else
+ mul64x64_128(t[0], r0, s0)
+ mul64x64_128(t[1], r0, s1) mul64x64_128(mul, r1, s0) add128(t[1], mul)
+ mul64x64_128(t[2], r0, s2) mul64x64_128(mul, r2, s0) add128(t[2], mul) mul64x64_128(mul, r1, s1) add128(t[2], mul)
+ mul64x64_128(t[3], r0, s3) mul64x64_128(mul, r3, s0) add128(t[3], mul) mul64x64_128(mul, r1, s2) add128(t[3], mul) mul64x64_128(mul, r2, s1) add128(t[3], mul)
+ mul64x64_128(t[4], r0, s4) mul64x64_128(mul, r4, s0) add128(t[4], mul) mul64x64_128(mul, r3, s1) add128(t[4], mul) mul64x64_128(mul, r1, s3) add128(t[4], mul) mul64x64_128(mul, r2, s2) add128(t[4], mul)
+#endif
+
+ r1 *= 19;
+ r2 *= 19;
+ r3 *= 19;
+ r4 *= 19;
+
+#if defined(HAVE_NATIVE_UINT128)
+ t[0] += ((uint128_t) r4) * s1 + ((uint128_t) r1) * s4 + ((uint128_t) r2) * s3 + ((uint128_t) r3) * s2;
+ t[1] += ((uint128_t) r4) * s2 + ((uint128_t) r2) * s4 + ((uint128_t) r3) * s3;
+ t[2] += ((uint128_t) r4) * s3 + ((uint128_t) r3) * s4;
+ t[3] += ((uint128_t) r4) * s4;
+#else
+ mul64x64_128(mul, r4, s1) add128(t[0], mul) mul64x64_128(mul, r1, s4) add128(t[0], mul) mul64x64_128(mul, r2, s3) add128(t[0], mul) mul64x64_128(mul, r3, s2) add128(t[0], mul)
+ mul64x64_128(mul, r4, s2) add128(t[1], mul) mul64x64_128(mul, r2, s4) add128(t[1], mul) mul64x64_128(mul, r3, s3) add128(t[1], mul)
+ mul64x64_128(mul, r4, s3) add128(t[2], mul) mul64x64_128(mul, r3, s4) add128(t[2], mul)
+ mul64x64_128(mul, r4, s4) add128(t[3], mul)
+#endif
+
+
+ r0 = lo128(t[0]) & reduce_mask_51; shr128(c, t[0], 51);
+ add128_64(t[1], c) r1 = lo128(t[1]) & reduce_mask_51; shr128(c, t[1], 51);
+ add128_64(t[2], c) r2 = lo128(t[2]) & reduce_mask_51; shr128(c, t[2], 51);
+ add128_64(t[3], c) r3 = lo128(t[3]) & reduce_mask_51; shr128(c, t[3], 51);
+ add128_64(t[4], c) r4 = lo128(t[4]) & reduce_mask_51; shr128(c, t[4], 51);
+ r0 += c * 19; c = r0 >> 51; r0 = r0 & reduce_mask_51;
+ r1 += c;
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+}
+
+DONNA_NOINLINE static void
+curve25519_mul_noinline(bignum25519 out, const bignum25519 in2, const bignum25519 in) {
+ curve25519_mul(out, in2, in);
+}
+
+/* out = in^(2 * count) */
+DONNA_NOINLINE static void
+curve25519_square_times(bignum25519 out, const bignum25519 in, uint64_t count) {
+#if !defined(HAVE_NATIVE_UINT128)
+ uint128_t mul;
+#endif
+ uint128_t t[5];
+ uint64_t r0,r1,r2,r3,r4,c;
+ uint64_t d0,d1,d2,d4,d419;
+
+ r0 = in[0];
+ r1 = in[1];
+ r2 = in[2];
+ r3 = in[3];
+ r4 = in[4];
+
+ do {
+ d0 = r0 * 2;
+ d1 = r1 * 2;
+ d2 = r2 * 2 * 19;
+ d419 = r4 * 19;
+ d4 = d419 * 2;
+
+#if defined(HAVE_NATIVE_UINT128)
+ t[0] = ((uint128_t) r0) * r0 + ((uint128_t) d4) * r1 + (((uint128_t) d2) * (r3 ));
+ t[1] = ((uint128_t) d0) * r1 + ((uint128_t) d4) * r2 + (((uint128_t) r3) * (r3 * 19));
+ t[2] = ((uint128_t) d0) * r2 + ((uint128_t) r1) * r1 + (((uint128_t) d4) * (r3 ));
+ t[3] = ((uint128_t) d0) * r3 + ((uint128_t) d1) * r2 + (((uint128_t) r4) * (d419 ));
+ t[4] = ((uint128_t) d0) * r4 + ((uint128_t) d1) * r3 + (((uint128_t) r2) * (r2 ));
+#else
+ mul64x64_128(t[0], r0, r0) mul64x64_128(mul, d4, r1) add128(t[0], mul) mul64x64_128(mul, d2, r3) add128(t[0], mul)
+ mul64x64_128(t[1], d0, r1) mul64x64_128(mul, d4, r2) add128(t[1], mul) mul64x64_128(mul, r3, r3 * 19) add128(t[1], mul)
+ mul64x64_128(t[2], d0, r2) mul64x64_128(mul, r1, r1) add128(t[2], mul) mul64x64_128(mul, d4, r3) add128(t[2], mul)
+ mul64x64_128(t[3], d0, r3) mul64x64_128(mul, d1, r2) add128(t[3], mul) mul64x64_128(mul, r4, d419) add128(t[3], mul)
+ mul64x64_128(t[4], d0, r4) mul64x64_128(mul, d1, r3) add128(t[4], mul) mul64x64_128(mul, r2, r2) add128(t[4], mul)
+#endif
+
+ r0 = lo128(t[0]) & reduce_mask_51;
+ r1 = lo128(t[1]) & reduce_mask_51; shl128(c, t[0], 13); r1 += c;
+ r2 = lo128(t[2]) & reduce_mask_51; shl128(c, t[1], 13); r2 += c;
+ r3 = lo128(t[3]) & reduce_mask_51; shl128(c, t[2], 13); r3 += c;
+ r4 = lo128(t[4]) & reduce_mask_51; shl128(c, t[3], 13); r4 += c;
+ shl128(c, t[4], 13); r0 += c * 19;
+ c = r0 >> 51; r0 &= reduce_mask_51;
+ r1 += c ; c = r1 >> 51; r1 &= reduce_mask_51;
+ r2 += c ; c = r2 >> 51; r2 &= reduce_mask_51;
+ r3 += c ; c = r3 >> 51; r3 &= reduce_mask_51;
+ r4 += c ; c = r4 >> 51; r4 &= reduce_mask_51;
+ r0 += c * 19;
+ } while(--count);
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+}
+
+DONNA_INLINE static void
+curve25519_square(bignum25519 out, const bignum25519 in) {
+#if !defined(HAVE_NATIVE_UINT128)
+ uint128_t mul;
+#endif
+ uint128_t t[5];
+ uint64_t r0,r1,r2,r3,r4,c;
+ uint64_t d0,d1,d2,d4,d419;
+
+ r0 = in[0];
+ r1 = in[1];
+ r2 = in[2];
+ r3 = in[3];
+ r4 = in[4];
+
+ d0 = r0 * 2;
+ d1 = r1 * 2;
+ d2 = r2 * 2 * 19;
+ d419 = r4 * 19;
+ d4 = d419 * 2;
+
+#if defined(HAVE_NATIVE_UINT128)
+ t[0] = ((uint128_t) r0) * r0 + ((uint128_t) d4) * r1 + (((uint128_t) d2) * (r3 ));
+ t[1] = ((uint128_t) d0) * r1 + ((uint128_t) d4) * r2 + (((uint128_t) r3) * (r3 * 19));
+ t[2] = ((uint128_t) d0) * r2 + ((uint128_t) r1) * r1 + (((uint128_t) d4) * (r3 ));
+ t[3] = ((uint128_t) d0) * r3 + ((uint128_t) d1) * r2 + (((uint128_t) r4) * (d419 ));
+ t[4] = ((uint128_t) d0) * r4 + ((uint128_t) d1) * r3 + (((uint128_t) r2) * (r2 ));
+#else
+ mul64x64_128(t[0], r0, r0) mul64x64_128(mul, d4, r1) add128(t[0], mul) mul64x64_128(mul, d2, r3) add128(t[0], mul)
+ mul64x64_128(t[1], d0, r1) mul64x64_128(mul, d4, r2) add128(t[1], mul) mul64x64_128(mul, r3, r3 * 19) add128(t[1], mul)
+ mul64x64_128(t[2], d0, r2) mul64x64_128(mul, r1, r1) add128(t[2], mul) mul64x64_128(mul, d4, r3) add128(t[2], mul)
+ mul64x64_128(t[3], d0, r3) mul64x64_128(mul, d1, r2) add128(t[3], mul) mul64x64_128(mul, r4, d419) add128(t[3], mul)
+ mul64x64_128(t[4], d0, r4) mul64x64_128(mul, d1, r3) add128(t[4], mul) mul64x64_128(mul, r2, r2) add128(t[4], mul)
+#endif
+
+ r0 = lo128(t[0]) & reduce_mask_51; shr128(c, t[0], 51);
+ add128_64(t[1], c) r1 = lo128(t[1]) & reduce_mask_51; shr128(c, t[1], 51);
+ add128_64(t[2], c) r2 = lo128(t[2]) & reduce_mask_51; shr128(c, t[2], 51);
+ add128_64(t[3], c) r3 = lo128(t[3]) & reduce_mask_51; shr128(c, t[3], 51);
+ add128_64(t[4], c) r4 = lo128(t[4]) & reduce_mask_51; shr128(c, t[4], 51);
+ r0 += c * 19; c = r0 >> 51; r0 = r0 & reduce_mask_51;
+ r1 += c;
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+}
+
+/* Take a little-endian, 32-byte number and expand it into polynomial form */
+DONNA_INLINE static void
+curve25519_expand(bignum25519 out, const unsigned char *in) {
+ static const union { uint8_t b[2]; uint16_t s; } endian_check = {{1,0}};
+ uint64_t x0,x1,x2,x3;
+
+ if (endian_check.s == 1) {
+ x0 = *(uint64_t *)(in + 0);
+ x1 = *(uint64_t *)(in + 8);
+ x2 = *(uint64_t *)(in + 16);
+ x3 = *(uint64_t *)(in + 24);
+ } else {
+ #define F(s) \
+ ((((uint64_t)in[s + 0]) ) | \
+ (((uint64_t)in[s + 1]) << 8) | \
+ (((uint64_t)in[s + 2]) << 16) | \
+ (((uint64_t)in[s + 3]) << 24) | \
+ (((uint64_t)in[s + 4]) << 32) | \
+ (((uint64_t)in[s + 5]) << 40) | \
+ (((uint64_t)in[s + 6]) << 48) | \
+ (((uint64_t)in[s + 7]) << 56))
+
+ x0 = F(0);
+ x1 = F(8);
+ x2 = F(16);
+ x3 = F(24);
+ }
+
+ out[0] = x0 & reduce_mask_51; x0 = (x0 >> 51) | (x1 << 13);
+ out[1] = x0 & reduce_mask_51; x1 = (x1 >> 38) | (x2 << 26);
+ out[2] = x1 & reduce_mask_51; x2 = (x2 >> 25) | (x3 << 39);
+ out[3] = x2 & reduce_mask_51; x3 = (x3 >> 12);
+ out[4] = x3 & reduce_mask_51;
+}
+
+/* Take a fully reduced polynomial form number and contract it into a
+ * little-endian, 32-byte array
+ */
+DONNA_INLINE static void
+curve25519_contract(unsigned char *out, const bignum25519 input) {
+ uint64_t t[5];
+ uint64_t f, i;
+
+ t[0] = input[0];
+ t[1] = input[1];
+ t[2] = input[2];
+ t[3] = input[3];
+ t[4] = input[4];
+
+ #define curve25519_contract_carry() \
+ t[1] += t[0] >> 51; t[0] &= reduce_mask_51; \
+ t[2] += t[1] >> 51; t[1] &= reduce_mask_51; \
+ t[3] += t[2] >> 51; t[2] &= reduce_mask_51; \
+ t[4] += t[3] >> 51; t[3] &= reduce_mask_51;
+
+ #define curve25519_contract_carry_full() curve25519_contract_carry() \
+ t[0] += 19 * (t[4] >> 51); t[4] &= reduce_mask_51;
+
+ #define curve25519_contract_carry_final() curve25519_contract_carry() \
+ t[4] &= reduce_mask_51;
+
+ curve25519_contract_carry_full()
+ curve25519_contract_carry_full()
+
+ /* now t is between 0 and 2^255-1, properly carried. */
+ /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
+ t[0] += 19;
+ curve25519_contract_carry_full()
+
+ /* now between 19 and 2^255-1 in both cases, and offset by 19. */
+ t[0] += (reduce_mask_51 + 1) - 19;
+ t[1] += (reduce_mask_51 + 1) - 1;
+ t[2] += (reduce_mask_51 + 1) - 1;
+ t[3] += (reduce_mask_51 + 1) - 1;
+ t[4] += (reduce_mask_51 + 1) - 1;
+
+ /* now between 2^255 and 2^256-20, and offset by 2^255. */
+ curve25519_contract_carry_final()
+
+ #define write51full(n,shift) \
+ f = ((t[n] >> shift) | (t[n+1] << (51 - shift))); \
+ for (i = 0; i < 8; i++, f >>= 8) *out++ = (unsigned char)f;
+ #define write51(n) write51full(n,13*n)
+ write51(0)
+ write51(1)
+ write51(2)
+ write51(3)
+}
+
+#if !defined(ED25519_GCC_64BIT_CHOOSE)
+
+/* out = (flag) ? in : out */
+DONNA_INLINE static void
+curve25519_move_conditional_bytes(uint8_t out[96], const uint8_t in[96], uint64_t flag) {
+ const uint64_t nb = flag - 1, b = ~nb;
+ const uint64_t *inq = (const uint64_t *)in;
+ uint64_t *outq = (uint64_t *)out;
+ outq[0] = (outq[0] & nb) | (inq[0] & b);
+ outq[1] = (outq[1] & nb) | (inq[1] & b);
+ outq[2] = (outq[2] & nb) | (inq[2] & b);
+ outq[3] = (outq[3] & nb) | (inq[3] & b);
+ outq[4] = (outq[4] & nb) | (inq[4] & b);
+ outq[5] = (outq[5] & nb) | (inq[5] & b);
+ outq[6] = (outq[6] & nb) | (inq[6] & b);
+ outq[7] = (outq[7] & nb) | (inq[7] & b);
+ outq[8] = (outq[8] & nb) | (inq[8] & b);
+ outq[9] = (outq[9] & nb) | (inq[9] & b);
+ outq[10] = (outq[10] & nb) | (inq[10] & b);
+ outq[11] = (outq[11] & nb) | (inq[11] & b);
+}
+
+/* if (iswap) swap(a, b) */
+DONNA_INLINE static void
+curve25519_swap_conditional(bignum25519 a, bignum25519 b, uint64_t iswap) {
+ const uint64_t swap = (uint64_t)(-(int64_t)iswap);
+ uint64_t x0,x1,x2,x3,x4;
+
+ x0 = swap & (a[0] ^ b[0]); a[0] ^= x0; b[0] ^= x0;
+ x1 = swap & (a[1] ^ b[1]); a[1] ^= x1; b[1] ^= x1;
+ x2 = swap & (a[2] ^ b[2]); a[2] ^= x2; b[2] ^= x2;
+ x3 = swap & (a[3] ^ b[3]); a[3] ^= x3; b[3] ^= x3;
+ x4 = swap & (a[4] ^ b[4]); a[4] ^= x4; b[4] ^= x4;
+}
+
+#endif /* ED25519_GCC_64BIT_CHOOSE */
+
+#define ED25519_64BIT_TABLES
+
diff --git a/src/ext/ed25519/donna/curve25519-donna-helpers.h b/src/ext/ed25519/donna/curve25519-donna-helpers.h
new file mode 100644
index 0000000000..e4058ff5ec
--- /dev/null
+++ b/src/ext/ed25519/donna/curve25519-donna-helpers.h
@@ -0,0 +1,67 @@
+/*
+ Public domain by Andrew M.
+ See: https://github.com/floodyberry/curve25519-donna
+
+ Curve25519 implementation agnostic helpers
+*/
+
+/*
+ * In: b = 2^5 - 2^0
+ * Out: b = 2^250 - 2^0
+ */
+static void
+curve25519_pow_two5mtwo0_two250mtwo0(bignum25519 b) {
+ bignum25519 ALIGN(16) t0,c;
+
+ /* 2^5 - 2^0 */ /* b */
+ /* 2^10 - 2^5 */ curve25519_square_times(t0, b, 5);
+ /* 2^10 - 2^0 */ curve25519_mul_noinline(b, t0, b);
+ /* 2^20 - 2^10 */ curve25519_square_times(t0, b, 10);
+ /* 2^20 - 2^0 */ curve25519_mul_noinline(c, t0, b);
+ /* 2^40 - 2^20 */ curve25519_square_times(t0, c, 20);
+ /* 2^40 - 2^0 */ curve25519_mul_noinline(t0, t0, c);
+ /* 2^50 - 2^10 */ curve25519_square_times(t0, t0, 10);
+ /* 2^50 - 2^0 */ curve25519_mul_noinline(b, t0, b);
+ /* 2^100 - 2^50 */ curve25519_square_times(t0, b, 50);
+ /* 2^100 - 2^0 */ curve25519_mul_noinline(c, t0, b);
+ /* 2^200 - 2^100 */ curve25519_square_times(t0, c, 100);
+ /* 2^200 - 2^0 */ curve25519_mul_noinline(t0, t0, c);
+ /* 2^250 - 2^50 */ curve25519_square_times(t0, t0, 50);
+ /* 2^250 - 2^0 */ curve25519_mul_noinline(b, t0, b);
+}
+
+/*
+ * z^(p - 2) = z(2^255 - 21)
+ */
+static void
+curve25519_recip(bignum25519 out, const bignum25519 z) {
+ bignum25519 ALIGN(16) a,t0,b;
+
+ /* 2 */ curve25519_square_times(a, z, 1); /* a = 2 */
+ /* 8 */ curve25519_square_times(t0, a, 2);
+ /* 9 */ curve25519_mul_noinline(b, t0, z); /* b = 9 */
+ /* 11 */ curve25519_mul_noinline(a, b, a); /* a = 11 */
+ /* 22 */ curve25519_square_times(t0, a, 1);
+ /* 2^5 - 2^0 = 31 */ curve25519_mul_noinline(b, t0, b);
+ /* 2^250 - 2^0 */ curve25519_pow_two5mtwo0_two250mtwo0(b);
+ /* 2^255 - 2^5 */ curve25519_square_times(b, b, 5);
+ /* 2^255 - 21 */ curve25519_mul_noinline(out, b, a);
+}
+
+/*
+ * z^((p-5)/8) = z^(2^252 - 3)
+ */
+static void
+curve25519_pow_two252m3(bignum25519 two252m3, const bignum25519 z) {
+ bignum25519 ALIGN(16) b,c,t0;
+
+ /* 2 */ curve25519_square_times(c, z, 1); /* c = 2 */
+ /* 8 */ curve25519_square_times(t0, c, 2); /* t0 = 8 */
+ /* 9 */ curve25519_mul_noinline(b, t0, z); /* b = 9 */
+ /* 11 */ curve25519_mul_noinline(c, b, c); /* c = 11 */
+ /* 22 */ curve25519_square_times(t0, c, 1);
+ /* 2^5 - 2^0 = 31 */ curve25519_mul_noinline(b, t0, b);
+ /* 2^250 - 2^0 */ curve25519_pow_two5mtwo0_two250mtwo0(b);
+ /* 2^252 - 2^2 */ curve25519_square_times(b, b, 2);
+ /* 2^252 - 3 */ curve25519_mul_noinline(two252m3, b, z);
+}
diff --git a/src/ext/ed25519/donna/curve25519-donna-sse2.h b/src/ext/ed25519/donna/curve25519-donna-sse2.h
new file mode 100644
index 0000000000..1dbfd44d8b
--- /dev/null
+++ b/src/ext/ed25519/donna/curve25519-donna-sse2.h
@@ -0,0 +1,1112 @@
+/*
+ Public domain by Andrew M.
+ See: https://github.com/floodyberry/curve25519-donna
+
+ SSE2 curve25519 implementation
+*/
+
+#include
+typedef __m128i xmmi;
+
+typedef union packedelem8_t {
+ unsigned char u[16];
+ xmmi v;
+} packedelem8;
+
+typedef union packedelem32_t {
+ uint32_t u[4];
+ xmmi v;
+} packedelem32;
+
+typedef union packedelem64_t {
+ uint64_t u[2];
+ xmmi v;
+} packedelem64;
+
+/* 10 elements + an extra 2 to fit in 3 xmm registers */
+typedef uint32_t bignum25519[12];
+typedef packedelem32 packed32bignum25519[5];
+typedef packedelem64 packed64bignum25519[10];
+
+static const packedelem32 bot32bitmask = {{0xffffffff, 0x00000000, 0xffffffff, 0x00000000}};
+static const packedelem32 top32bitmask = {{0x00000000, 0xffffffff, 0x00000000, 0xffffffff}};
+static const packedelem32 top64bitmask = {{0x00000000, 0x00000000, 0xffffffff, 0xffffffff}};
+static const packedelem32 bot64bitmask = {{0xffffffff, 0xffffffff, 0x00000000, 0x00000000}};
+
+/* reduction masks */
+static const packedelem64 packedmask26 = {{0x03ffffff, 0x03ffffff}};
+static const packedelem64 packedmask25 = {{0x01ffffff, 0x01ffffff}};
+static const packedelem32 packedmask2625 = {{0x3ffffff,0,0x1ffffff,0}};
+static const packedelem32 packedmask26262626 = {{0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff}};
+static const packedelem32 packedmask25252525 = {{0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}};
+
+/* multipliers */
+static const packedelem64 packednineteen = {{19, 19}};
+static const packedelem64 packednineteenone = {{19, 1}};
+static const packedelem64 packedthirtyeight = {{38, 38}};
+static const packedelem64 packed3819 = {{19*2,19}};
+static const packedelem64 packed9638 = {{19*4,19*2}};
+
+/* 121666,121665 */
+static const packedelem64 packed121666121665 = {{121666, 121665}};
+
+/* 2*(2^255 - 19) = 0 mod p */
+static const packedelem32 packed2p0 = {{0x7ffffda,0x3fffffe,0x7fffffe,0x3fffffe}};
+static const packedelem32 packed2p1 = {{0x7fffffe,0x3fffffe,0x7fffffe,0x3fffffe}};
+static const packedelem32 packed2p2 = {{0x7fffffe,0x3fffffe,0x0000000,0x0000000}};
+
+static const packedelem32 packed32packed2p0 = {{0x7ffffda,0x7ffffda,0x3fffffe,0x3fffffe}};
+static const packedelem32 packed32packed2p1 = {{0x7fffffe,0x7fffffe,0x3fffffe,0x3fffffe}};
+
+/* 4*(2^255 - 19) = 0 mod p */
+static const packedelem32 packed4p0 = {{0xfffffb4,0x7fffffc,0xffffffc,0x7fffffc}};
+static const packedelem32 packed4p1 = {{0xffffffc,0x7fffffc,0xffffffc,0x7fffffc}};
+static const packedelem32 packed4p2 = {{0xffffffc,0x7fffffc,0x0000000,0x0000000}};
+
+static const packedelem32 packed32packed4p0 = {{0xfffffb4,0xfffffb4,0x7fffffc,0x7fffffc}};
+static const packedelem32 packed32packed4p1 = {{0xffffffc,0xffffffc,0x7fffffc,0x7fffffc}};
+
+/* out = in */
+DONNA_INLINE static void
+curve25519_copy(bignum25519 out, const bignum25519 in) {
+ xmmi x0,x1,x2;
+ x0 = _mm_load_si128((xmmi*)in + 0);
+ x1 = _mm_load_si128((xmmi*)in + 1);
+ x2 = _mm_load_si128((xmmi*)in + 2);
+ _mm_store_si128((xmmi*)out + 0, x0);
+ _mm_store_si128((xmmi*)out + 1, x1);
+ _mm_store_si128((xmmi*)out + 2, x2);
+}
+
+/* out = a + b */
+DONNA_INLINE static void
+curve25519_add(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ xmmi a0,a1,a2,b0,b1,b2;
+ a0 = _mm_load_si128((xmmi*)a + 0);
+ a1 = _mm_load_si128((xmmi*)a + 1);
+ a2 = _mm_load_si128((xmmi*)a + 2);
+ b0 = _mm_load_si128((xmmi*)b + 0);
+ b1 = _mm_load_si128((xmmi*)b + 1);
+ b2 = _mm_load_si128((xmmi*)b + 2);
+ a0 = _mm_add_epi32(a0, b0);
+ a1 = _mm_add_epi32(a1, b1);
+ a2 = _mm_add_epi32(a2, b2);
+ _mm_store_si128((xmmi*)out + 0, a0);
+ _mm_store_si128((xmmi*)out + 1, a1);
+ _mm_store_si128((xmmi*)out + 2, a2);
+}
+
+#define curve25519_add_after_basic curve25519_add_reduce
+DONNA_INLINE static void
+curve25519_add_reduce(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ xmmi a0,a1,a2,b0,b1,b2;
+ xmmi c1,c2,c3;
+ xmmi r0,r1,r2,r3,r4,r5;
+
+ a0 = _mm_load_si128((xmmi*)a + 0);
+ a1 = _mm_load_si128((xmmi*)a + 1);
+ a2 = _mm_load_si128((xmmi*)a + 2);
+ b0 = _mm_load_si128((xmmi*)b + 0);
+ b1 = _mm_load_si128((xmmi*)b + 1);
+ b2 = _mm_load_si128((xmmi*)b + 2);
+ a0 = _mm_add_epi32(a0, b0);
+ a1 = _mm_add_epi32(a1, b1);
+ a2 = _mm_add_epi32(a2, b2);
+
+ r0 = _mm_and_si128(_mm_unpacklo_epi64(a0, a1), bot32bitmask.v);
+ r1 = _mm_srli_epi64(_mm_unpacklo_epi64(a0, a1), 32);
+ r2 = _mm_and_si128(_mm_unpackhi_epi64(a0, a1), bot32bitmask.v);
+ r3 = _mm_srli_epi64(_mm_unpackhi_epi64(a0, a1), 32);
+ r4 = _mm_and_si128(_mm_unpacklo_epi64(_mm_setzero_si128(), a2), bot32bitmask.v);
+ r5 = _mm_srli_epi64(_mm_unpacklo_epi64(_mm_setzero_si128(), a2), 32);
+
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+ c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8);
+ c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1);
+ c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3));
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+
+ _mm_store_si128((xmmi*)out + 0, _mm_unpacklo_epi64(_mm_unpacklo_epi32(r0, r1), _mm_unpacklo_epi32(r2, r3)));
+ _mm_store_si128((xmmi*)out + 1, _mm_unpacklo_epi64(_mm_unpackhi_epi32(r0, r1), _mm_unpackhi_epi32(r2, r3)));
+ _mm_store_si128((xmmi*)out + 2, _mm_unpackhi_epi32(r4, r5));
+}
+
+DONNA_INLINE static void
+curve25519_sub(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ xmmi a0,a1,a2,b0,b1,b2;
+ xmmi c1,c2;
+ xmmi r0,r1;
+
+ a0 = _mm_load_si128((xmmi*)a + 0);
+ a1 = _mm_load_si128((xmmi*)a + 1);
+ a2 = _mm_load_si128((xmmi*)a + 2);
+ a0 = _mm_add_epi32(a0, packed2p0.v);
+ a1 = _mm_add_epi32(a1, packed2p1.v);
+ a2 = _mm_add_epi32(a2, packed2p2.v);
+ b0 = _mm_load_si128((xmmi*)b + 0);
+ b1 = _mm_load_si128((xmmi*)b + 1);
+ b2 = _mm_load_si128((xmmi*)b + 2);
+ a0 = _mm_sub_epi32(a0, b0);
+ a1 = _mm_sub_epi32(a1, b1);
+ a2 = _mm_sub_epi32(a2, b2);
+
+ r0 = _mm_and_si128(_mm_shuffle_epi32(a0, _MM_SHUFFLE(2,2,0,0)), bot32bitmask.v);
+ r1 = _mm_and_si128(_mm_shuffle_epi32(a0, _MM_SHUFFLE(3,3,1,1)), bot32bitmask.v);
+
+ c1 = _mm_srli_epi32(r0, 26);
+ c2 = _mm_srli_epi32(r1, 25);
+ r0 = _mm_and_si128(r0, packedmask26.v);
+ r1 = _mm_and_si128(r1, packedmask25.v);
+ r0 = _mm_add_epi32(r0, _mm_slli_si128(c2, 8));
+ r1 = _mm_add_epi32(r1, c1);
+
+ a0 = _mm_unpacklo_epi64(_mm_unpacklo_epi32(r0, r1), _mm_unpackhi_epi32(r0, r1));
+ a1 = _mm_add_epi32(a1, _mm_srli_si128(c2, 8));
+
+ _mm_store_si128((xmmi*)out + 0, a0);
+ _mm_store_si128((xmmi*)out + 1, a1);
+ _mm_store_si128((xmmi*)out + 2, a2);
+}
+
+DONNA_INLINE static void
+curve25519_sub_after_basic(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ xmmi a0,a1,a2,b0,b1,b2;
+ xmmi c1,c2,c3;
+ xmmi r0,r1,r2,r3,r4,r5;
+
+ a0 = _mm_load_si128((xmmi*)a + 0);
+ a1 = _mm_load_si128((xmmi*)a + 1);
+ a2 = _mm_load_si128((xmmi*)a + 2);
+ a0 = _mm_add_epi32(a0, packed4p0.v);
+ a1 = _mm_add_epi32(a1, packed4p1.v);
+ a2 = _mm_add_epi32(a2, packed4p2.v);
+ b0 = _mm_load_si128((xmmi*)b + 0);
+ b1 = _mm_load_si128((xmmi*)b + 1);
+ b2 = _mm_load_si128((xmmi*)b + 2);
+ a0 = _mm_sub_epi32(a0, b0);
+ a1 = _mm_sub_epi32(a1, b1);
+ a2 = _mm_sub_epi32(a2, b2);
+
+ r0 = _mm_and_si128(_mm_unpacklo_epi64(a0, a1), bot32bitmask.v);
+ r1 = _mm_srli_epi64(_mm_unpacklo_epi64(a0, a1), 32);
+ r2 = _mm_and_si128(_mm_unpackhi_epi64(a0, a1), bot32bitmask.v);
+ r3 = _mm_srli_epi64(_mm_unpackhi_epi64(a0, a1), 32);
+ r4 = _mm_and_si128(_mm_unpacklo_epi64(_mm_setzero_si128(), a2), bot32bitmask.v);
+ r5 = _mm_srli_epi64(_mm_unpacklo_epi64(_mm_setzero_si128(), a2), 32);
+
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+ c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8);
+ c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1);
+ c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3));
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+
+ _mm_store_si128((xmmi*)out + 0, _mm_unpacklo_epi64(_mm_unpacklo_epi32(r0, r1), _mm_unpacklo_epi32(r2, r3)));
+ _mm_store_si128((xmmi*)out + 1, _mm_unpacklo_epi64(_mm_unpackhi_epi32(r0, r1), _mm_unpackhi_epi32(r2, r3)));
+ _mm_store_si128((xmmi*)out + 2, _mm_unpackhi_epi32(r4, r5));
+}
+
+DONNA_INLINE static void
+curve25519_sub_reduce(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ xmmi a0,a1,a2,b0,b1,b2;
+ xmmi c1,c2,c3;
+ xmmi r0,r1,r2,r3,r4,r5;
+
+ a0 = _mm_load_si128((xmmi*)a + 0);
+ a1 = _mm_load_si128((xmmi*)a + 1);
+ a2 = _mm_load_si128((xmmi*)a + 2);
+ a0 = _mm_add_epi32(a0, packed2p0.v);
+ a1 = _mm_add_epi32(a1, packed2p1.v);
+ a2 = _mm_add_epi32(a2, packed2p2.v);
+ b0 = _mm_load_si128((xmmi*)b + 0);
+ b1 = _mm_load_si128((xmmi*)b + 1);
+ b2 = _mm_load_si128((xmmi*)b + 2);
+ a0 = _mm_sub_epi32(a0, b0);
+ a1 = _mm_sub_epi32(a1, b1);
+ a2 = _mm_sub_epi32(a2, b2);
+
+ r0 = _mm_and_si128(_mm_unpacklo_epi64(a0, a1), bot32bitmask.v);
+ r1 = _mm_srli_epi64(_mm_unpacklo_epi64(a0, a1), 32);
+ r2 = _mm_and_si128(_mm_unpackhi_epi64(a0, a1), bot32bitmask.v);
+ r3 = _mm_srli_epi64(_mm_unpackhi_epi64(a0, a1), 32);
+ r4 = _mm_and_si128(_mm_unpacklo_epi64(_mm_setzero_si128(), a2), bot32bitmask.v);
+ r5 = _mm_srli_epi64(_mm_unpacklo_epi64(_mm_setzero_si128(), a2), 32);
+
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+ c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8);
+ c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1);
+ c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3));
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+
+ _mm_store_si128((xmmi*)out + 0, _mm_unpacklo_epi64(_mm_unpacklo_epi32(r0, r1), _mm_unpacklo_epi32(r2, r3)));
+ _mm_store_si128((xmmi*)out + 1, _mm_unpacklo_epi64(_mm_unpackhi_epi32(r0, r1), _mm_unpackhi_epi32(r2, r3)));
+ _mm_store_si128((xmmi*)out + 2, _mm_unpackhi_epi32(r4, r5));
+}
+
+
+DONNA_INLINE static void
+curve25519_neg(bignum25519 out, const bignum25519 b) {
+ xmmi a0,a1,a2,b0,b1,b2;
+ xmmi c1,c2,c3;
+ xmmi r0,r1,r2,r3,r4,r5;
+
+ a0 = packed2p0.v;
+ a1 = packed2p1.v;
+ a2 = packed2p2.v;
+ b0 = _mm_load_si128((xmmi*)b + 0);
+ b1 = _mm_load_si128((xmmi*)b + 1);
+ b2 = _mm_load_si128((xmmi*)b + 2);
+ a0 = _mm_sub_epi32(a0, b0);
+ a1 = _mm_sub_epi32(a1, b1);
+ a2 = _mm_sub_epi32(a2, b2);
+
+ r0 = _mm_and_si128(_mm_unpacklo_epi64(a0, a1), bot32bitmask.v);
+ r1 = _mm_srli_epi64(_mm_unpacklo_epi64(a0, a1), 32);
+ r2 = _mm_and_si128(_mm_unpackhi_epi64(a0, a1), bot32bitmask.v);
+ r3 = _mm_srli_epi64(_mm_unpackhi_epi64(a0, a1), 32);
+ r4 = _mm_and_si128(_mm_unpacklo_epi64(_mm_setzero_si128(), a2), bot32bitmask.v);
+ r5 = _mm_srli_epi64(_mm_unpacklo_epi64(_mm_setzero_si128(), a2), 32);
+
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+ c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8);
+ c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1);
+ c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3));
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+
+ _mm_store_si128((xmmi*)out + 0, _mm_unpacklo_epi64(_mm_unpacklo_epi32(r0, r1), _mm_unpacklo_epi32(r2, r3)));
+ _mm_store_si128((xmmi*)out + 1, _mm_unpacklo_epi64(_mm_unpackhi_epi32(r0, r1), _mm_unpackhi_epi32(r2, r3)));
+ _mm_store_si128((xmmi*)out + 2, _mm_unpackhi_epi32(r4, r5));
+}
+
+
+/* Multiply two numbers: out = in2 * in */
+static void
+curve25519_mul(bignum25519 out, const bignum25519 r, const bignum25519 s) {
+ xmmi m01,m23,m45,m67,m89;
+ xmmi m0123,m4567;
+ xmmi s0123,s4567;
+ xmmi s01,s23,s45,s67,s89;
+ xmmi s12,s34,s56,s78,s9;
+ xmmi r0,r2,r4,r6,r8;
+ xmmi r1,r3,r5,r7,r9;
+ xmmi r119,r219,r319,r419,r519,r619,r719,r819,r919;
+ xmmi c1,c2,c3;
+
+ s0123 = _mm_load_si128((xmmi*)s + 0);
+ s01 = _mm_shuffle_epi32(s0123,_MM_SHUFFLE(3,1,2,0));
+ s12 = _mm_shuffle_epi32(s0123, _MM_SHUFFLE(2,2,1,1));
+ s23 = _mm_shuffle_epi32(s0123,_MM_SHUFFLE(3,3,2,2));
+ s4567 = _mm_load_si128((xmmi*)s + 1);
+ s34 = _mm_unpacklo_epi64(_mm_srli_si128(s0123,12),s4567);
+ s45 = _mm_shuffle_epi32(s4567,_MM_SHUFFLE(3,1,2,0));
+ s56 = _mm_shuffle_epi32(s4567, _MM_SHUFFLE(2,2,1,1));
+ s67 = _mm_shuffle_epi32(s4567,_MM_SHUFFLE(3,3,2,2));
+ s89 = _mm_load_si128((xmmi*)s + 2);
+ s78 = _mm_unpacklo_epi64(_mm_srli_si128(s4567,12),s89);
+ s89 = _mm_shuffle_epi32(s89,_MM_SHUFFLE(3,1,2,0));
+ s9 = _mm_shuffle_epi32(s89, _MM_SHUFFLE(3,3,2,2));
+
+ r0 = _mm_load_si128((xmmi*)r + 0);
+ r1 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(1,1,1,1));
+ r1 = _mm_add_epi64(r1, _mm_and_si128(r1, top64bitmask.v));
+ r2 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(2,2,2,2));
+ r3 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(3,3,3,3));
+ r3 = _mm_add_epi64(r3, _mm_and_si128(r3, top64bitmask.v));
+ r0 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(0,0,0,0));
+ r4 = _mm_load_si128((xmmi*)r + 1);
+ r5 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(1,1,1,1));
+ r5 = _mm_add_epi64(r5, _mm_and_si128(r5, top64bitmask.v));
+ r6 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(2,2,2,2));
+ r7 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(3,3,3,3));
+ r7 = _mm_add_epi64(r7, _mm_and_si128(r7, top64bitmask.v));
+ r4 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(0,0,0,0));
+ r8 = _mm_load_si128((xmmi*)r + 2);
+ r9 = _mm_shuffle_epi32(r8, _MM_SHUFFLE(3,1,3,1));
+ r9 = _mm_add_epi64(r9, _mm_and_si128(r9, top64bitmask.v));
+ r8 = _mm_shuffle_epi32(r8, _MM_SHUFFLE(3,0,3,0));
+
+ m01 = _mm_mul_epu32(r1,s01);
+ m23 = _mm_mul_epu32(r1,s23);
+ m45 = _mm_mul_epu32(r1,s45);
+ m67 = _mm_mul_epu32(r1,s67);
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r3,s01));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r3,s23));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r3,s45));
+ m89 = _mm_mul_epu32(r1,s89);
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r5,s01));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r5,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r3,s67));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r7,s01));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r5,s45));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r7,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r9,s01));
+
+ /* shift up */
+ m89 = _mm_unpackhi_epi64(m67,_mm_slli_si128(m89,8));
+ m67 = _mm_unpackhi_epi64(m45,_mm_slli_si128(m67,8));
+ m45 = _mm_unpackhi_epi64(m23,_mm_slli_si128(m45,8));
+ m23 = _mm_unpackhi_epi64(m01,_mm_slli_si128(m23,8));
+ m01 = _mm_unpackhi_epi64(_mm_setzero_si128(),_mm_slli_si128(m01,8));
+
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r0,s01));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r0,s23));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r0,s45));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r0,s67));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r2,s01));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r2,s23));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r4,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r0,s89));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r4,s01));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r2,s45));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r2,s67));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r6,s01));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r4,s45));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r6,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r8,s01));
+
+ r219 = _mm_mul_epu32(r2, packednineteen.v);
+ r419 = _mm_mul_epu32(r4, packednineteen.v);
+ r619 = _mm_mul_epu32(r6, packednineteen.v);
+ r819 = _mm_mul_epu32(r8, packednineteen.v);
+ r119 = _mm_shuffle_epi32(r1,_MM_SHUFFLE(0,0,2,2)); r119 = _mm_mul_epu32(r119, packednineteen.v);
+ r319 = _mm_shuffle_epi32(r3,_MM_SHUFFLE(0,0,2,2)); r319 = _mm_mul_epu32(r319, packednineteen.v);
+ r519 = _mm_shuffle_epi32(r5,_MM_SHUFFLE(0,0,2,2)); r519 = _mm_mul_epu32(r519, packednineteen.v);
+ r719 = _mm_shuffle_epi32(r7,_MM_SHUFFLE(0,0,2,2)); r719 = _mm_mul_epu32(r719, packednineteen.v);
+ r919 = _mm_shuffle_epi32(r9,_MM_SHUFFLE(0,0,2,2)); r919 = _mm_mul_epu32(r919, packednineteen.v);
+
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r919,s12));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r919,s34));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r919,s56));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r919,s78));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r719,s34));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r719,s56));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r719,s78));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r719,s9));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r519,s56));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r519,s78));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r519,s9));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r819,s89));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r319,s78));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r319,s9));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r619,s89));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r919,s9));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r819,s23));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r819,s45));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r819,s67));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r619,s45));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r619,s67));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r419,s67));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r419,s89));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r219,s89));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r119,s9));
+
+ r0 = _mm_unpacklo_epi64(m01, m45);
+ r1 = _mm_unpackhi_epi64(m01, m45);
+ r2 = _mm_unpacklo_epi64(m23, m67);
+ r3 = _mm_unpackhi_epi64(m23, m67);
+ r4 = _mm_unpacklo_epi64(m89, m89);
+ r5 = _mm_unpackhi_epi64(m89, m89);
+
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+ c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8);
+ c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1);
+ c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3));
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+
+ m0123 = _mm_unpacklo_epi32(r0, r1);
+ m4567 = _mm_unpackhi_epi32(r0, r1);
+ m0123 = _mm_unpacklo_epi64(m0123, _mm_unpacklo_epi32(r2, r3));
+ m4567 = _mm_unpacklo_epi64(m4567, _mm_unpackhi_epi32(r2, r3));
+ m89 = _mm_unpackhi_epi32(r4, r5);
+
+ _mm_store_si128((xmmi*)out + 0, m0123);
+ _mm_store_si128((xmmi*)out + 1, m4567);
+ _mm_store_si128((xmmi*)out + 2, m89);
+}
+
+DONNA_NOINLINE static void
+curve25519_mul_noinline(bignum25519 out, const bignum25519 r, const bignum25519 s) {
+ curve25519_mul(out, r, s);
+}
+
+#define curve25519_square(r, n) curve25519_square_times(r, n, 1)
+static void
+curve25519_square_times(bignum25519 r, const bignum25519 in, int count) {
+ xmmi m01,m23,m45,m67,m89;
+ xmmi r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
+ xmmi r0a,r1a,r2a,r3a,r7a,r9a;
+ xmmi r0123,r4567;
+ xmmi r01,r23,r45,r67,r6x,r89,r8x;
+ xmmi r12,r34,r56,r78,r9x;
+ xmmi r5619;
+ xmmi c1,c2,c3;
+
+ r0123 = _mm_load_si128((xmmi*)in + 0);
+ r01 = _mm_shuffle_epi32(r0123,_MM_SHUFFLE(3,1,2,0));
+ r23 = _mm_shuffle_epi32(r0123,_MM_SHUFFLE(3,3,2,2));
+ r4567 = _mm_load_si128((xmmi*)in + 1);
+ r45 = _mm_shuffle_epi32(r4567,_MM_SHUFFLE(3,1,2,0));
+ r67 = _mm_shuffle_epi32(r4567,_MM_SHUFFLE(3,3,2,2));
+ r89 = _mm_load_si128((xmmi*)in + 2);
+ r89 = _mm_shuffle_epi32(r89,_MM_SHUFFLE(3,1,2,0));
+
+ do {
+ r12 = _mm_unpackhi_epi64(r01, _mm_slli_si128(r23, 8));
+ r0 = _mm_shuffle_epi32(r01, _MM_SHUFFLE(0,0,0,0));
+ r0 = _mm_add_epi64(r0, _mm_and_si128(r0, top64bitmask.v));
+ r0a = _mm_shuffle_epi32(r0,_MM_SHUFFLE(3,2,1,2));
+ r1 = _mm_shuffle_epi32(r01, _MM_SHUFFLE(2,2,2,2));
+ r2 = _mm_shuffle_epi32(r23, _MM_SHUFFLE(0,0,0,0));
+ r2 = _mm_add_epi64(r2, _mm_and_si128(r2, top64bitmask.v));
+ r2a = _mm_shuffle_epi32(r2,_MM_SHUFFLE(3,2,1,2));
+ r3 = _mm_shuffle_epi32(r23, _MM_SHUFFLE(2,2,2,2));
+ r34 = _mm_unpackhi_epi64(r23, _mm_slli_si128(r45, 8));
+ r4 = _mm_shuffle_epi32(r45, _MM_SHUFFLE(0,0,0,0));
+ r4 = _mm_add_epi64(r4, _mm_and_si128(r4, top64bitmask.v));
+ r56 = _mm_unpackhi_epi64(r45, _mm_slli_si128(r67, 8));
+ r5619 = _mm_mul_epu32(r56, packednineteen.v);
+ r5 = _mm_shuffle_epi32(r5619, _MM_SHUFFLE(1,1,1,0));
+ r6 = _mm_shuffle_epi32(r5619, _MM_SHUFFLE(3,2,3,2));
+ r78 = _mm_unpackhi_epi64(r67, _mm_slli_si128(r89, 8));
+ r6x = _mm_unpacklo_epi64(r67, _mm_setzero_si128());
+ r7 = _mm_shuffle_epi32(r67, _MM_SHUFFLE(2,2,2,2));
+ r7 = _mm_mul_epu32(r7, packed3819.v);
+ r7a = _mm_shuffle_epi32(r7, _MM_SHUFFLE(3,3,3,2));
+ r8x = _mm_unpacklo_epi64(r89, _mm_setzero_si128());
+ r8 = _mm_shuffle_epi32(r89, _MM_SHUFFLE(0,0,0,0));
+ r8 = _mm_mul_epu32(r8, packednineteen.v);
+ r9 = _mm_shuffle_epi32(r89, _MM_SHUFFLE(2,2,2,2));
+ r9x = _mm_slli_epi32(_mm_shuffle_epi32(r89, _MM_SHUFFLE(3,3,3,2)), 1);
+ r9 = _mm_mul_epu32(r9, packed3819.v);
+ r9a = _mm_shuffle_epi32(r9, _MM_SHUFFLE(2,2,2,2));
+
+ m01 = _mm_mul_epu32(r01, r0);
+ m23 = _mm_mul_epu32(r23, r0a);
+ m45 = _mm_mul_epu32(r45, r0a);
+ m45 = _mm_add_epi64(m45, _mm_mul_epu32(r23, r2));
+ r23 = _mm_slli_epi32(r23, 1);
+ m67 = _mm_mul_epu32(r67, r0a);
+ m67 = _mm_add_epi64(m67, _mm_mul_epu32(r45, r2a));
+ m89 = _mm_mul_epu32(r89, r0a);
+ m89 = _mm_add_epi64(m89, _mm_mul_epu32(r67, r2a));
+ r67 = _mm_slli_epi32(r67, 1);
+ m89 = _mm_add_epi64(m89, _mm_mul_epu32(r45, r4));
+ r45 = _mm_slli_epi32(r45, 1);
+
+ r1 = _mm_slli_epi32(r1, 1);
+ r3 = _mm_slli_epi32(r3, 1);
+ r1a = _mm_add_epi64(r1, _mm_and_si128(r1, bot64bitmask.v));
+ r3a = _mm_add_epi64(r3, _mm_and_si128(r3, bot64bitmask.v));
+
+ m23 = _mm_add_epi64(m23, _mm_mul_epu32(r12, r1));
+ m45 = _mm_add_epi64(m45, _mm_mul_epu32(r34, r1a));
+ m67 = _mm_add_epi64(m67, _mm_mul_epu32(r56, r1a));
+ m67 = _mm_add_epi64(m67, _mm_mul_epu32(r34, r3));
+ r34 = _mm_slli_epi32(r34, 1);
+ m89 = _mm_add_epi64(m89, _mm_mul_epu32(r78, r1a));
+ r78 = _mm_slli_epi32(r78, 1);
+ m89 = _mm_add_epi64(m89, _mm_mul_epu32(r56, r3a));
+ r56 = _mm_slli_epi32(r56, 1);
+
+ m01 = _mm_add_epi64(m01, _mm_mul_epu32(_mm_slli_epi32(r12, 1), r9));
+ m01 = _mm_add_epi64(m01, _mm_mul_epu32(r34, r7));
+ m23 = _mm_add_epi64(m23, _mm_mul_epu32(r34, r9));
+ m01 = _mm_add_epi64(m01, _mm_mul_epu32(r56, r5));
+ m23 = _mm_add_epi64(m23, _mm_mul_epu32(r56, r7));
+ m45 = _mm_add_epi64(m45, _mm_mul_epu32(r56, r9));
+ m01 = _mm_add_epi64(m01, _mm_mul_epu32(r23, r8));
+ m01 = _mm_add_epi64(m01, _mm_mul_epu32(r45, r6));
+ m23 = _mm_add_epi64(m23, _mm_mul_epu32(r45, r8));
+ m23 = _mm_add_epi64(m23, _mm_mul_epu32(r6x, r6));
+ m45 = _mm_add_epi64(m45, _mm_mul_epu32(r78, r7a));
+ m67 = _mm_add_epi64(m67, _mm_mul_epu32(r78, r9));
+ m45 = _mm_add_epi64(m45, _mm_mul_epu32(r67, r8));
+ m67 = _mm_add_epi64(m67, _mm_mul_epu32(r8x, r8));
+ m89 = _mm_add_epi64(m89, _mm_mul_epu32(r9x, r9a));
+
+ r0 = _mm_unpacklo_epi64(m01, m45);
+ r1 = _mm_unpackhi_epi64(m01, m45);
+ r2 = _mm_unpacklo_epi64(m23, m67);
+ r3 = _mm_unpackhi_epi64(m23, m67);
+ r4 = _mm_unpacklo_epi64(m89, m89);
+ r5 = _mm_unpackhi_epi64(m89, m89);
+
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+ c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8);
+ c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1);
+ c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3));
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+
+ r01 = _mm_unpacklo_epi64(r0, r1);
+ r45 = _mm_unpackhi_epi64(r0, r1);
+ r23 = _mm_unpacklo_epi64(r2, r3);
+ r67 = _mm_unpackhi_epi64(r2, r3);
+ r89 = _mm_unpackhi_epi64(r4, r5);
+ } while (--count);
+
+ r0123 = _mm_shuffle_epi32(r23, _MM_SHUFFLE(2,0,3,3));
+ r4567 = _mm_shuffle_epi32(r67, _MM_SHUFFLE(2,0,3,3));
+ r0123 = _mm_or_si128(r0123, _mm_shuffle_epi32(r01, _MM_SHUFFLE(3,3,2,0)));
+ r4567 = _mm_or_si128(r4567, _mm_shuffle_epi32(r45, _MM_SHUFFLE(3,3,2,0)));
+ r89 = _mm_shuffle_epi32(r89, _MM_SHUFFLE(3,3,2,0));
+
+ _mm_store_si128((xmmi*)r + 0, r0123);
+ _mm_store_si128((xmmi*)r + 1, r4567);
+ _mm_store_si128((xmmi*)r + 2, r89);
+}
+
+DONNA_INLINE static void
+curve25519_tangle32(packedelem32 *out, const bignum25519 x, const bignum25519 z) {
+ xmmi x0,x1,x2,z0,z1,z2;
+
+ x0 = _mm_load_si128((xmmi *)(x + 0));
+ x1 = _mm_load_si128((xmmi *)(x + 4));
+ x2 = _mm_load_si128((xmmi *)(x + 8));
+ z0 = _mm_load_si128((xmmi *)(z + 0));
+ z1 = _mm_load_si128((xmmi *)(z + 4));
+ z2 = _mm_load_si128((xmmi *)(z + 8));
+
+ out[0].v = _mm_unpacklo_epi32(x0, z0);
+ out[1].v = _mm_unpackhi_epi32(x0, z0);
+ out[2].v = _mm_unpacklo_epi32(x1, z1);
+ out[3].v = _mm_unpackhi_epi32(x1, z1);
+ out[4].v = _mm_unpacklo_epi32(x2, z2);
+}
+
+DONNA_INLINE static void
+curve25519_untangle32(bignum25519 x, bignum25519 z, const packedelem32 *in) {
+ xmmi t0,t1,t2,t3,t4,zero;
+
+ t0 = _mm_shuffle_epi32(in[0].v, _MM_SHUFFLE(3,1,2,0));
+ t1 = _mm_shuffle_epi32(in[1].v, _MM_SHUFFLE(3,1,2,0));
+ t2 = _mm_shuffle_epi32(in[2].v, _MM_SHUFFLE(3,1,2,0));
+ t3 = _mm_shuffle_epi32(in[3].v, _MM_SHUFFLE(3,1,2,0));
+ t4 = _mm_shuffle_epi32(in[4].v, _MM_SHUFFLE(3,1,2,0));
+ zero = _mm_setzero_si128();
+ _mm_store_si128((xmmi *)x + 0, _mm_unpacklo_epi64(t0, t1));
+ _mm_store_si128((xmmi *)x + 1, _mm_unpacklo_epi64(t2, t3));
+ _mm_store_si128((xmmi *)x + 2, _mm_unpacklo_epi64(t4, zero));
+ _mm_store_si128((xmmi *)z + 0, _mm_unpackhi_epi64(t0, t1));
+ _mm_store_si128((xmmi *)z + 1, _mm_unpackhi_epi64(t2, t3));
+ _mm_store_si128((xmmi *)z + 2, _mm_unpackhi_epi64(t4, zero));
+}
+
+DONNA_INLINE static void
+curve25519_add_reduce_packed32(packedelem32 *out, const packedelem32 *r, const packedelem32 *s) {
+ xmmi r0,r1,r2,r3,r4;
+ xmmi s0,s1,s2,s3,s4,s5;
+ xmmi c1,c2;
+
+ r0 = _mm_add_epi32(r[0].v, s[0].v);
+ r1 = _mm_add_epi32(r[1].v, s[1].v);
+ r2 = _mm_add_epi32(r[2].v, s[2].v);
+ r3 = _mm_add_epi32(r[3].v, s[3].v);
+ r4 = _mm_add_epi32(r[4].v, s[4].v);
+
+ s0 = _mm_unpacklo_epi64(r0, r2); /* 00 44 */
+ s1 = _mm_unpackhi_epi64(r0, r2); /* 11 55 */
+ s2 = _mm_unpacklo_epi64(r1, r3); /* 22 66 */
+ s3 = _mm_unpackhi_epi64(r1, r3); /* 33 77 */
+ s4 = _mm_unpacklo_epi64(_mm_setzero_si128(), r4); /* 00 88 */
+ s5 = _mm_unpackhi_epi64(_mm_setzero_si128(), r4); /* 00 99 */
+
+ c1 = _mm_srli_epi32(s0, 26); c2 = _mm_srli_epi32(s2, 26); s0 = _mm_and_si128(s0, packedmask26262626.v); s2 = _mm_and_si128(s2, packedmask26262626.v); s1 = _mm_add_epi32(s1, c1); s3 = _mm_add_epi32(s3, c2);
+ c1 = _mm_srli_epi32(s1, 25); c2 = _mm_srli_epi32(s3, 25); s1 = _mm_and_si128(s1, packedmask25252525.v); s3 = _mm_and_si128(s3, packedmask25252525.v); s2 = _mm_add_epi32(s2, c1); s4 = _mm_add_epi32(s4, _mm_unpackhi_epi64(_mm_setzero_si128(), c2)); s0 = _mm_add_epi32(s0, _mm_unpacklo_epi64(_mm_setzero_si128(), c2));
+ c1 = _mm_srli_epi32(s2, 26); c2 = _mm_srli_epi32(s4, 26); s2 = _mm_and_si128(s2, packedmask26262626.v); s4 = _mm_and_si128(s4, packedmask26262626.v); s3 = _mm_add_epi32(s3, c1); s5 = _mm_add_epi32(s5, c2);
+ c1 = _mm_srli_epi32(s3, 25); c2 = _mm_srli_epi32(s5, 25); s3 = _mm_and_si128(s3, packedmask25252525.v); s5 = _mm_and_si128(s5, packedmask25252525.v); s4 = _mm_add_epi32(s4, c1); s0 = _mm_add_epi32(s0, _mm_or_si128(_mm_slli_si128(c1, 8), _mm_srli_si128(_mm_add_epi32(_mm_add_epi32(_mm_slli_epi32(c2, 4), _mm_slli_epi32(c2, 1)), c2), 8)));
+ c1 = _mm_srli_epi32(s0, 26); c2 = _mm_srli_epi32(s2, 26); s0 = _mm_and_si128(s0, packedmask26262626.v); s2 = _mm_and_si128(s2, packedmask26262626.v); s1 = _mm_add_epi32(s1, c1); s3 = _mm_add_epi32(s3, c2);
+
+ out[0].v = _mm_unpacklo_epi64(s0, s1); /* 00 11 */
+ out[1].v = _mm_unpacklo_epi64(s2, s3); /* 22 33 */
+ out[2].v = _mm_unpackhi_epi64(s0, s1); /* 44 55 */
+ out[3].v = _mm_unpackhi_epi64(s2, s3); /* 66 77 */
+ out[4].v = _mm_unpackhi_epi64(s4, s5); /* 88 99 */
+}
+
+DONNA_INLINE static void
+curve25519_add_packed32(packedelem32 *out, const packedelem32 *r, const packedelem32 *s) {
+ out[0].v = _mm_add_epi32(r[0].v, s[0].v);
+ out[1].v = _mm_add_epi32(r[1].v, s[1].v);
+ out[2].v = _mm_add_epi32(r[2].v, s[2].v);
+ out[3].v = _mm_add_epi32(r[3].v, s[3].v);
+ out[4].v = _mm_add_epi32(r[4].v, s[4].v);
+}
+
+DONNA_INLINE static void
+curve25519_sub_packed32(packedelem32 *out, const packedelem32 *r, const packedelem32 *s) {
+ xmmi r0,r1,r2,r3,r4;
+ xmmi s0,s1,s2,s3;
+ xmmi c1,c2;
+
+ r0 = _mm_add_epi32(r[0].v, packed32packed2p0.v);
+ r1 = _mm_add_epi32(r[1].v, packed32packed2p1.v);
+ r2 = _mm_add_epi32(r[2].v, packed32packed2p1.v);
+ r3 = _mm_add_epi32(r[3].v, packed32packed2p1.v);
+ r4 = _mm_add_epi32(r[4].v, packed32packed2p1.v);
+ r0 = _mm_sub_epi32(r0, s[0].v); /* 00 11 */
+ r1 = _mm_sub_epi32(r1, s[1].v); /* 22 33 */
+ r2 = _mm_sub_epi32(r2, s[2].v); /* 44 55 */
+ r3 = _mm_sub_epi32(r3, s[3].v); /* 66 77 */
+ r4 = _mm_sub_epi32(r4, s[4].v); /* 88 99 */
+
+ s0 = _mm_unpacklo_epi64(r0, r2); /* 00 44 */
+ s1 = _mm_unpackhi_epi64(r0, r2); /* 11 55 */
+ s2 = _mm_unpacklo_epi64(r1, r3); /* 22 66 */
+ s3 = _mm_unpackhi_epi64(r1, r3); /* 33 77 */
+
+ c1 = _mm_srli_epi32(s0, 26); c2 = _mm_srli_epi32(s2, 26); s0 = _mm_and_si128(s0, packedmask26262626.v); s2 = _mm_and_si128(s2, packedmask26262626.v); s1 = _mm_add_epi32(s1, c1); s3 = _mm_add_epi32(s3, c2);
+ c1 = _mm_srli_epi32(s1, 25); c2 = _mm_srli_epi32(s3, 25); s1 = _mm_and_si128(s1, packedmask25252525.v); s3 = _mm_and_si128(s3, packedmask25252525.v); s2 = _mm_add_epi32(s2, c1); r4 = _mm_add_epi32(r4, _mm_srli_si128(c2, 8)); s0 = _mm_add_epi32(s0, _mm_slli_si128(c2, 8));
+
+ out[0].v = _mm_unpacklo_epi64(s0, s1); /* 00 11 */
+ out[1].v = _mm_unpacklo_epi64(s2, s3); /* 22 33 */
+ out[2].v = _mm_unpackhi_epi64(s0, s1); /* 44 55 */
+ out[3].v = _mm_unpackhi_epi64(s2, s3); /* 66 77 */
+ out[4].v = r4;
+}
+
+DONNA_INLINE static void
+curve25519_sub_after_basic_packed32(packedelem32 *out, const packedelem32 *r, const packedelem32 *s) {
+ xmmi r0,r1,r2,r3,r4;
+ xmmi s0,s1,s2,s3,s4,s5;
+ xmmi c1,c2;
+
+ r0 = _mm_add_epi32(r[0].v, packed32packed4p0.v);
+ r1 = _mm_add_epi32(r[1].v, packed32packed4p1.v);
+ r2 = _mm_add_epi32(r[2].v, packed32packed4p1.v);
+ r3 = _mm_add_epi32(r[3].v, packed32packed4p1.v);
+ r4 = _mm_add_epi32(r[4].v, packed32packed4p1.v);
+ r0 = _mm_sub_epi32(r0, s[0].v); /* 00 11 */
+ r1 = _mm_sub_epi32(r1, s[1].v); /* 22 33 */
+ r2 = _mm_sub_epi32(r2, s[2].v); /* 44 55 */
+ r3 = _mm_sub_epi32(r3, s[3].v); /* 66 77 */
+ r4 = _mm_sub_epi32(r4, s[4].v); /* 88 99 */
+
+ s0 = _mm_unpacklo_epi64(r0, r2); /* 00 44 */
+ s1 = _mm_unpackhi_epi64(r0, r2); /* 11 55 */
+ s2 = _mm_unpacklo_epi64(r1, r3); /* 22 66 */
+ s3 = _mm_unpackhi_epi64(r1, r3); /* 33 77 */
+ s4 = _mm_unpacklo_epi64(_mm_setzero_si128(), r4); /* 00 88 */
+ s5 = _mm_unpackhi_epi64(_mm_setzero_si128(), r4); /* 00 99 */
+
+ c1 = _mm_srli_epi32(s0, 26); c2 = _mm_srli_epi32(s2, 26); s0 = _mm_and_si128(s0, packedmask26262626.v); s2 = _mm_and_si128(s2, packedmask26262626.v); s1 = _mm_add_epi32(s1, c1); s3 = _mm_add_epi32(s3, c2);
+ c1 = _mm_srli_epi32(s1, 25); c2 = _mm_srli_epi32(s3, 25); s1 = _mm_and_si128(s1, packedmask25252525.v); s3 = _mm_and_si128(s3, packedmask25252525.v); s2 = _mm_add_epi32(s2, c1); s4 = _mm_add_epi32(s4, _mm_unpackhi_epi64(_mm_setzero_si128(), c2)); s0 = _mm_add_epi32(s0, _mm_unpacklo_epi64(_mm_setzero_si128(), c2));
+ c1 = _mm_srli_epi32(s2, 26); c2 = _mm_srli_epi32(s4, 26); s2 = _mm_and_si128(s2, packedmask26262626.v); s4 = _mm_and_si128(s4, packedmask26262626.v); s3 = _mm_add_epi32(s3, c1); s5 = _mm_add_epi32(s5, c2);
+ c1 = _mm_srli_epi32(s3, 25); c2 = _mm_srli_epi32(s5, 25); s3 = _mm_and_si128(s3, packedmask25252525.v); s5 = _mm_and_si128(s5, packedmask25252525.v); s4 = _mm_add_epi32(s4, c1); s0 = _mm_add_epi32(s0, _mm_or_si128(_mm_slli_si128(c1, 8), _mm_srli_si128(_mm_add_epi32(_mm_add_epi32(_mm_slli_epi32(c2, 4), _mm_slli_epi32(c2, 1)), c2), 8)));
+ c1 = _mm_srli_epi32(s0, 26); c2 = _mm_srli_epi32(s2, 26); s0 = _mm_and_si128(s0, packedmask26262626.v); s2 = _mm_and_si128(s2, packedmask26262626.v); s1 = _mm_add_epi32(s1, c1); s3 = _mm_add_epi32(s3, c2);
+
+ out[0].v = _mm_unpacklo_epi64(s0, s1); /* 00 11 */
+ out[1].v = _mm_unpacklo_epi64(s2, s3); /* 22 33 */
+ out[2].v = _mm_unpackhi_epi64(s0, s1); /* 44 55 */
+ out[3].v = _mm_unpackhi_epi64(s2, s3); /* 66 77 */
+ out[4].v = _mm_unpackhi_epi64(s4, s5); /* 88 99 */
+}
+
+DONNA_INLINE static void
+curve25519_tangle64_from32(packedelem64 *a, packedelem64 *b, const packedelem32 *c, const packedelem32 *d) {
+ xmmi c0,c1,c2,c3,c4,c5,t;
+ xmmi d0,d1,d2,d3,d4,d5;
+ xmmi t0,t1,t2,t3,t4,zero;
+
+ t0 = _mm_shuffle_epi32(c[0].v, _MM_SHUFFLE(3,1,2,0));
+ t1 = _mm_shuffle_epi32(c[1].v, _MM_SHUFFLE(3,1,2,0));
+ t2 = _mm_shuffle_epi32(d[0].v, _MM_SHUFFLE(3,1,2,0));
+ t3 = _mm_shuffle_epi32(d[1].v, _MM_SHUFFLE(3,1,2,0));
+ c0 = _mm_unpacklo_epi64(t0, t1);
+ c3 = _mm_unpackhi_epi64(t0, t1);
+ d0 = _mm_unpacklo_epi64(t2, t3);
+ d3 = _mm_unpackhi_epi64(t2, t3);
+ t = _mm_unpacklo_epi64(c0, d0); a[0].v = t; a[1].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpackhi_epi64(c0, d0); a[2].v = t; a[3].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpacklo_epi64(c3, d3); b[0].v = t; b[1].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpackhi_epi64(c3, d3); b[2].v = t; b[3].v = _mm_srli_epi64(t, 32);
+
+ t0 = _mm_shuffle_epi32(c[2].v, _MM_SHUFFLE(3,1,2,0));
+ t1 = _mm_shuffle_epi32(c[3].v, _MM_SHUFFLE(3,1,2,0));
+ t2 = _mm_shuffle_epi32(d[2].v, _MM_SHUFFLE(3,1,2,0));
+ t3 = _mm_shuffle_epi32(d[3].v, _MM_SHUFFLE(3,1,2,0));
+ c1 = _mm_unpacklo_epi64(t0, t1);
+ c4 = _mm_unpackhi_epi64(t0, t1);
+ d1 = _mm_unpacklo_epi64(t2, t3);
+ d4 = _mm_unpackhi_epi64(t2, t3);
+ t = _mm_unpacklo_epi64(c1, d1); a[4].v = t; a[5].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpackhi_epi64(c1, d1); a[6].v = t; a[7].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpacklo_epi64(c4, d4); b[4].v = t; b[5].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpackhi_epi64(c4, d4); b[6].v = t; b[7].v = _mm_srli_epi64(t, 32);
+
+ t4 = _mm_shuffle_epi32(c[4].v, _MM_SHUFFLE(3,1,2,0));
+ zero = _mm_setzero_si128();
+ c2 = _mm_unpacklo_epi64(t4, zero);
+ c5 = _mm_unpackhi_epi64(t4, zero);
+ t4 = _mm_shuffle_epi32(d[4].v, _MM_SHUFFLE(3,1,2,0));
+ d2 = _mm_unpacklo_epi64(t4, zero);
+ d5 = _mm_unpackhi_epi64(t4, zero);
+ t = _mm_unpacklo_epi64(c2, d2); a[8].v = t; a[9].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpacklo_epi64(c5, d5); b[8].v = t; b[9].v = _mm_srli_epi64(t, 32);
+}
+
+DONNA_INLINE static void
+curve25519_tangle64(packedelem64 *out, const bignum25519 x, const bignum25519 z) {
+ xmmi x0,x1,x2,z0,z1,z2,t;
+
+ x0 = _mm_load_si128((xmmi *)x + 0);
+ x1 = _mm_load_si128((xmmi *)x + 1);
+ x2 = _mm_load_si128((xmmi *)x + 2);
+ z0 = _mm_load_si128((xmmi *)z + 0);
+ z1 = _mm_load_si128((xmmi *)z + 1);
+ z2 = _mm_load_si128((xmmi *)z + 2);
+
+ t = _mm_unpacklo_epi64(x0, z0); out[0].v = t; out[1].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpackhi_epi64(x0, z0); out[2].v = t; out[3].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpacklo_epi64(x1, z1); out[4].v = t; out[5].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpackhi_epi64(x1, z1); out[6].v = t; out[7].v = _mm_srli_epi64(t, 32);
+ t = _mm_unpacklo_epi64(x2, z2); out[8].v = t; out[9].v = _mm_srli_epi64(t, 32);
+}
+
+DONNA_INLINE static void
+curve25519_tangleone64(packedelem64 *out, const bignum25519 x) {
+ xmmi x0,x1,x2;
+
+ x0 = _mm_load_si128((xmmi *)(x + 0));
+ x1 = _mm_load_si128((xmmi *)(x + 4));
+ x2 = _mm_load_si128((xmmi *)(x + 8));
+
+ out[0].v = _mm_shuffle_epi32(x0, _MM_SHUFFLE(0,0,0,0));
+ out[1].v = _mm_shuffle_epi32(x0, _MM_SHUFFLE(1,1,1,1));
+ out[2].v = _mm_shuffle_epi32(x0, _MM_SHUFFLE(2,2,2,2));
+ out[3].v = _mm_shuffle_epi32(x0, _MM_SHUFFLE(3,3,3,3));
+ out[4].v = _mm_shuffle_epi32(x1, _MM_SHUFFLE(0,0,0,0));
+ out[5].v = _mm_shuffle_epi32(x1, _MM_SHUFFLE(1,1,1,1));
+ out[6].v = _mm_shuffle_epi32(x1, _MM_SHUFFLE(2,2,2,2));
+ out[7].v = _mm_shuffle_epi32(x1, _MM_SHUFFLE(3,3,3,3));
+ out[8].v = _mm_shuffle_epi32(x2, _MM_SHUFFLE(0,0,0,0));
+ out[9].v = _mm_shuffle_epi32(x2, _MM_SHUFFLE(1,1,1,1));
+}
+
+DONNA_INLINE static void
+curve25519_swap64(packedelem64 *out) {
+ out[0].v = _mm_shuffle_epi32(out[0].v, _MM_SHUFFLE(1,0,3,2));
+ out[1].v = _mm_shuffle_epi32(out[1].v, _MM_SHUFFLE(1,0,3,2));
+ out[2].v = _mm_shuffle_epi32(out[2].v, _MM_SHUFFLE(1,0,3,2));
+ out[3].v = _mm_shuffle_epi32(out[3].v, _MM_SHUFFLE(1,0,3,2));
+ out[4].v = _mm_shuffle_epi32(out[4].v, _MM_SHUFFLE(1,0,3,2));
+ out[5].v = _mm_shuffle_epi32(out[5].v, _MM_SHUFFLE(1,0,3,2));
+ out[6].v = _mm_shuffle_epi32(out[6].v, _MM_SHUFFLE(1,0,3,2));
+ out[7].v = _mm_shuffle_epi32(out[7].v, _MM_SHUFFLE(1,0,3,2));
+ out[8].v = _mm_shuffle_epi32(out[8].v, _MM_SHUFFLE(1,0,3,2));
+ out[9].v = _mm_shuffle_epi32(out[9].v, _MM_SHUFFLE(1,0,3,2));
+}
+
+DONNA_INLINE static void
+curve25519_untangle64(bignum25519 x, bignum25519 z, const packedelem64 *in) {
+ _mm_store_si128((xmmi *)(x + 0), _mm_unpacklo_epi64(_mm_unpacklo_epi32(in[0].v, in[1].v), _mm_unpacklo_epi32(in[2].v, in[3].v)));
+ _mm_store_si128((xmmi *)(x + 4), _mm_unpacklo_epi64(_mm_unpacklo_epi32(in[4].v, in[5].v), _mm_unpacklo_epi32(in[6].v, in[7].v)));
+ _mm_store_si128((xmmi *)(x + 8), _mm_unpacklo_epi32(in[8].v, in[9].v) );
+ _mm_store_si128((xmmi *)(z + 0), _mm_unpacklo_epi64(_mm_unpackhi_epi32(in[0].v, in[1].v), _mm_unpackhi_epi32(in[2].v, in[3].v)));
+ _mm_store_si128((xmmi *)(z + 4), _mm_unpacklo_epi64(_mm_unpackhi_epi32(in[4].v, in[5].v), _mm_unpackhi_epi32(in[6].v, in[7].v)));
+ _mm_store_si128((xmmi *)(z + 8), _mm_unpackhi_epi32(in[8].v, in[9].v) );
+}
+
+DONNA_INLINE static void
+curve25519_mul_packed64(packedelem64 *out, const packedelem64 *r, const packedelem64 *s) {
+ xmmi r1,r2,r3,r4,r5,r6,r7,r8,r9;
+ xmmi r1_2,r3_2,r5_2,r7_2,r9_2;
+ xmmi c1,c2;
+
+ out[0].v = _mm_mul_epu32(r[0].v, s[0].v);
+ out[1].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[1].v), _mm_mul_epu32(r[1].v, s[0].v));
+ r1_2 = _mm_slli_epi32(r[1].v, 1);
+ out[2].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[1].v), _mm_mul_epu32(r[2].v, s[0].v)));
+ out[3].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[1].v), _mm_mul_epu32(r[3].v, s[0].v))));
+ r3_2 = _mm_slli_epi32(r[3].v, 1);
+ out[4].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[3].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r3_2 , s[1].v), _mm_mul_epu32(r[4].v, s[0].v)))));
+ out[5].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[5].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[3].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[1].v), _mm_mul_epu32(r[5].v, s[0].v))))));
+ r5_2 = _mm_slli_epi32(r[5].v, 1);
+ out[6].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[5].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r3_2 , s[3].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r5_2 , s[1].v), _mm_mul_epu32(r[6].v, s[0].v)))))));
+ out[7].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[7].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[5].v), _mm_add_epi64(_mm_mul_epu32(r[3].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[5].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[6].v, s[1].v), _mm_mul_epu32(r[7].v , s[0].v))))))));
+ r7_2 = _mm_slli_epi32(r[7].v, 1);
+ out[8].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[8].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[7].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r3_2 , s[5].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r5_2 , s[3].v), _mm_add_epi64(_mm_mul_epu32(r[6].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r7_2 , s[1].v), _mm_mul_epu32(r[8].v, s[0].v)))))))));
+ out[9].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[9].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[8].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[7].v), _mm_add_epi64(_mm_mul_epu32(r[3].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[5].v), _mm_add_epi64(_mm_mul_epu32(r[5].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r[6].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[7].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[8].v, s[1].v), _mm_mul_epu32(r[9].v, s[0].v))))))))));
+
+ r1 = _mm_mul_epu32(r[1].v, packednineteen.v);
+ r2 = _mm_mul_epu32(r[2].v, packednineteen.v);
+ r1_2 = _mm_slli_epi32(r1, 1);
+ r3 = _mm_mul_epu32(r[3].v, packednineteen.v);
+ r4 = _mm_mul_epu32(r[4].v, packednineteen.v);
+ r3_2 = _mm_slli_epi32(r3, 1);
+ r5 = _mm_mul_epu32(r[5].v, packednineteen.v);
+ r6 = _mm_mul_epu32(r[6].v, packednineteen.v);
+ r5_2 = _mm_slli_epi32(r5, 1);
+ r7 = _mm_mul_epu32(r[7].v, packednineteen.v);
+ r8 = _mm_mul_epu32(r[8].v, packednineteen.v);
+ r7_2 = _mm_slli_epi32(r7, 1);
+ r9 = _mm_mul_epu32(r[9].v, packednineteen.v);
+ r9_2 = _mm_slli_epi32(r9, 1);
+
+ out[0].v = _mm_add_epi64(out[0].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[1].v), _mm_add_epi64(_mm_mul_epu32(r8, s[2].v), _mm_add_epi64(_mm_mul_epu32(r7_2, s[3].v), _mm_add_epi64(_mm_mul_epu32(r6, s[4].v), _mm_add_epi64(_mm_mul_epu32(r5_2, s[5].v), _mm_add_epi64(_mm_mul_epu32(r4, s[6].v), _mm_add_epi64(_mm_mul_epu32(r3_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r2, s[8].v), _mm_mul_epu32(r1_2, s[9].v))))))))));
+ out[1].v = _mm_add_epi64(out[1].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[2].v), _mm_add_epi64(_mm_mul_epu32(r8, s[3].v), _mm_add_epi64(_mm_mul_epu32(r7 , s[4].v), _mm_add_epi64(_mm_mul_epu32(r6, s[5].v), _mm_add_epi64(_mm_mul_epu32(r5 , s[6].v), _mm_add_epi64(_mm_mul_epu32(r4, s[7].v), _mm_add_epi64(_mm_mul_epu32(r3 , s[8].v), _mm_mul_epu32(r2, s[9].v)))))))));
+ out[2].v = _mm_add_epi64(out[2].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[3].v), _mm_add_epi64(_mm_mul_epu32(r8, s[4].v), _mm_add_epi64(_mm_mul_epu32(r7_2, s[5].v), _mm_add_epi64(_mm_mul_epu32(r6, s[6].v), _mm_add_epi64(_mm_mul_epu32(r5_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r4, s[8].v), _mm_mul_epu32(r3_2, s[9].v))))))));
+ out[3].v = _mm_add_epi64(out[3].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[4].v), _mm_add_epi64(_mm_mul_epu32(r8, s[5].v), _mm_add_epi64(_mm_mul_epu32(r7 , s[6].v), _mm_add_epi64(_mm_mul_epu32(r6, s[7].v), _mm_add_epi64(_mm_mul_epu32(r5 , s[8].v), _mm_mul_epu32(r4, s[9].v)))))));
+ out[4].v = _mm_add_epi64(out[4].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[5].v), _mm_add_epi64(_mm_mul_epu32(r8, s[6].v), _mm_add_epi64(_mm_mul_epu32(r7_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r6, s[8].v), _mm_mul_epu32(r5_2, s[9].v))))));
+ out[5].v = _mm_add_epi64(out[5].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[6].v), _mm_add_epi64(_mm_mul_epu32(r8, s[7].v), _mm_add_epi64(_mm_mul_epu32(r7 , s[8].v), _mm_mul_epu32(r6, s[9].v)))));
+ out[6].v = _mm_add_epi64(out[6].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r8, s[8].v), _mm_mul_epu32(r7_2, s[9].v))));
+ out[7].v = _mm_add_epi64(out[7].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[8].v), _mm_mul_epu32(r8, s[9].v)));
+ out[8].v = _mm_add_epi64(out[8].v, _mm_mul_epu32(r9_2, s[9].v));
+
+ c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2);
+ c1 = _mm_srli_epi64(out[1].v, 25); c2 = _mm_srli_epi64(out[5].v, 25); out[1].v = _mm_and_si128(out[1].v, packedmask25.v); out[5].v = _mm_and_si128(out[5].v, packedmask25.v); out[2].v = _mm_add_epi64(out[2].v, c1); out[6].v = _mm_add_epi64(out[6].v, c2);
+ c1 = _mm_srli_epi64(out[2].v, 26); c2 = _mm_srli_epi64(out[6].v, 26); out[2].v = _mm_and_si128(out[2].v, packedmask26.v); out[6].v = _mm_and_si128(out[6].v, packedmask26.v); out[3].v = _mm_add_epi64(out[3].v, c1); out[7].v = _mm_add_epi64(out[7].v, c2);
+ c1 = _mm_srli_epi64(out[3].v, 25); c2 = _mm_srli_epi64(out[7].v, 25); out[3].v = _mm_and_si128(out[3].v, packedmask25.v); out[7].v = _mm_and_si128(out[7].v, packedmask25.v); out[4].v = _mm_add_epi64(out[4].v, c1); out[8].v = _mm_add_epi64(out[8].v, c2);
+ c2 = _mm_srli_epi64(out[8].v, 26); out[8].v = _mm_and_si128(out[8].v, packedmask26.v); out[9].v = _mm_add_epi64(out[9].v, c2);
+ c2 = _mm_srli_epi64(out[9].v, 25); out[9].v = _mm_and_si128(out[9].v, packedmask25.v); out[0].v = _mm_add_epi64(out[0].v, _mm_mul_epu32(c2, packednineteen.v));
+ c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2);
+}
+
+DONNA_INLINE static void
+curve25519_square_packed64(packedelem64 *out, const packedelem64 *r) {
+ xmmi r0,r1,r2,r3;
+ xmmi r1_2,r3_2,r4_2,r5_2,r6_2,r7_2;
+ xmmi d5,d6,d7,d8,d9;
+ xmmi c1,c2;
+
+ r0 = r[0].v;
+ r1 = r[1].v;
+ r2 = r[2].v;
+ r3 = r[3].v;
+
+ out[0].v = _mm_mul_epu32(r0, r0);
+ r0 = _mm_slli_epi32(r0, 1);
+ out[1].v = _mm_mul_epu32(r0, r1);
+ r1_2 = _mm_slli_epi32(r1, 1);
+ out[2].v = _mm_add_epi64(_mm_mul_epu32(r0, r2 ), _mm_mul_epu32(r1, r1_2));
+ r1 = r1_2;
+ out[3].v = _mm_add_epi64(_mm_mul_epu32(r0, r3 ), _mm_mul_epu32(r1, r2 ));
+ r3_2 = _mm_slli_epi32(r3, 1);
+ out[4].v = _mm_add_epi64(_mm_mul_epu32(r0, r[4].v), _mm_add_epi64(_mm_mul_epu32(r1, r3_2 ), _mm_mul_epu32(r2, r2)));
+ r2 = _mm_slli_epi32(r2, 1);
+ out[5].v = _mm_add_epi64(_mm_mul_epu32(r0, r[5].v), _mm_add_epi64(_mm_mul_epu32(r1, r[4].v), _mm_mul_epu32(r2, r3)));
+ r5_2 = _mm_slli_epi32(r[5].v, 1);
+ out[6].v = _mm_add_epi64(_mm_mul_epu32(r0, r[6].v), _mm_add_epi64(_mm_mul_epu32(r1, r5_2 ), _mm_add_epi64(_mm_mul_epu32(r2, r[4].v), _mm_mul_epu32(r3, r3_2 ))));
+ r3 = r3_2;
+ out[7].v = _mm_add_epi64(_mm_mul_epu32(r0, r[7].v), _mm_add_epi64(_mm_mul_epu32(r1, r[6].v), _mm_add_epi64(_mm_mul_epu32(r2, r[5].v), _mm_mul_epu32(r3, r[4].v))));
+ r7_2 = _mm_slli_epi32(r[7].v, 1);
+ out[8].v = _mm_add_epi64(_mm_mul_epu32(r0, r[8].v), _mm_add_epi64(_mm_mul_epu32(r1, r7_2 ), _mm_add_epi64(_mm_mul_epu32(r2, r[6].v), _mm_add_epi64(_mm_mul_epu32(r3, r5_2 ), _mm_mul_epu32(r[4].v, r[4].v)))));
+ out[9].v = _mm_add_epi64(_mm_mul_epu32(r0, r[9].v), _mm_add_epi64(_mm_mul_epu32(r1, r[8].v), _mm_add_epi64(_mm_mul_epu32(r2, r[7].v), _mm_add_epi64(_mm_mul_epu32(r3, r[6].v), _mm_mul_epu32(r[4].v, r5_2 )))));
+
+ d5 = _mm_mul_epu32(r[5].v, packedthirtyeight.v);
+ d6 = _mm_mul_epu32(r[6].v, packednineteen.v);
+ d7 = _mm_mul_epu32(r[7].v, packedthirtyeight.v);
+ d8 = _mm_mul_epu32(r[8].v, packednineteen.v);
+ d9 = _mm_mul_epu32(r[9].v, packedthirtyeight.v);
+
+ r4_2 = _mm_slli_epi32(r[4].v, 1);
+ r6_2 = _mm_slli_epi32(r[6].v, 1);
+ out[0].v = _mm_add_epi64(out[0].v, _mm_add_epi64(_mm_mul_epu32(d9, r1 ), _mm_add_epi64(_mm_mul_epu32(d8, r2 ), _mm_add_epi64(_mm_mul_epu32(d7, r3 ), _mm_add_epi64(_mm_mul_epu32(d6, r4_2), _mm_mul_epu32(d5, r[5].v))))));
+ out[1].v = _mm_add_epi64(out[1].v, _mm_add_epi64(_mm_mul_epu32(d9, _mm_srli_epi32(r2, 1)), _mm_add_epi64(_mm_mul_epu32(d8, r3 ), _mm_add_epi64(_mm_mul_epu32(d7, r[4].v), _mm_mul_epu32(d6, r5_2 )))));
+ out[2].v = _mm_add_epi64(out[2].v, _mm_add_epi64(_mm_mul_epu32(d9, r3 ), _mm_add_epi64(_mm_mul_epu32(d8, r4_2), _mm_add_epi64(_mm_mul_epu32(d7, r5_2 ), _mm_mul_epu32(d6, r[6].v)))));
+ out[3].v = _mm_add_epi64(out[3].v, _mm_add_epi64(_mm_mul_epu32(d9, r[4].v ), _mm_add_epi64(_mm_mul_epu32(d8, r5_2), _mm_mul_epu32(d7, r[6].v))));
+ out[4].v = _mm_add_epi64(out[4].v, _mm_add_epi64(_mm_mul_epu32(d9, r5_2 ), _mm_add_epi64(_mm_mul_epu32(d8, r6_2), _mm_mul_epu32(d7, r[7].v))));
+ out[5].v = _mm_add_epi64(out[5].v, _mm_add_epi64(_mm_mul_epu32(d9, r[6].v ), _mm_mul_epu32(d8, r7_2 )));
+ out[6].v = _mm_add_epi64(out[6].v, _mm_add_epi64(_mm_mul_epu32(d9, r7_2 ), _mm_mul_epu32(d8, r[8].v)));
+ out[7].v = _mm_add_epi64(out[7].v, _mm_mul_epu32(d9, r[8].v));
+ out[8].v = _mm_add_epi64(out[8].v, _mm_mul_epu32(d9, r[9].v));
+
+ c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2);
+ c1 = _mm_srli_epi64(out[1].v, 25); c2 = _mm_srli_epi64(out[5].v, 25); out[1].v = _mm_and_si128(out[1].v, packedmask25.v); out[5].v = _mm_and_si128(out[5].v, packedmask25.v); out[2].v = _mm_add_epi64(out[2].v, c1); out[6].v = _mm_add_epi64(out[6].v, c2);
+ c1 = _mm_srli_epi64(out[2].v, 26); c2 = _mm_srli_epi64(out[6].v, 26); out[2].v = _mm_and_si128(out[2].v, packedmask26.v); out[6].v = _mm_and_si128(out[6].v, packedmask26.v); out[3].v = _mm_add_epi64(out[3].v, c1); out[7].v = _mm_add_epi64(out[7].v, c2);
+ c1 = _mm_srli_epi64(out[3].v, 25); c2 = _mm_srli_epi64(out[7].v, 25); out[3].v = _mm_and_si128(out[3].v, packedmask25.v); out[7].v = _mm_and_si128(out[7].v, packedmask25.v); out[4].v = _mm_add_epi64(out[4].v, c1); out[8].v = _mm_add_epi64(out[8].v, c2);
+ c2 = _mm_srli_epi64(out[8].v, 26); out[8].v = _mm_and_si128(out[8].v, packedmask26.v); out[9].v = _mm_add_epi64(out[9].v, c2);
+ c2 = _mm_srli_epi64(out[9].v, 25); out[9].v = _mm_and_si128(out[9].v, packedmask25.v); out[0].v = _mm_add_epi64(out[0].v, _mm_mul_epu32(c2, packednineteen.v));
+ c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2);
+}
+
+
+/* Take a little-endian, 32-byte number and expand it into polynomial form */
+static void
+curve25519_expand(bignum25519 out, const unsigned char in[32]) {
+ uint32_t x0,x1,x2,x3,x4,x5,x6,x7;
+
+ x0 = *(uint32_t *)(in + 0);
+ x1 = *(uint32_t *)(in + 4);
+ x2 = *(uint32_t *)(in + 8);
+ x3 = *(uint32_t *)(in + 12);
+ x4 = *(uint32_t *)(in + 16);
+ x5 = *(uint32_t *)(in + 20);
+ x6 = *(uint32_t *)(in + 24);
+ x7 = *(uint32_t *)(in + 28);
+
+ out[0] = ( x0 ) & 0x3ffffff;
+ out[1] = ((((uint64_t)x1 << 32) | x0) >> 26) & 0x1ffffff;
+ out[2] = ((((uint64_t)x2 << 32) | x1) >> 19) & 0x3ffffff;
+ out[3] = ((((uint64_t)x3 << 32) | x2) >> 13) & 0x1ffffff;
+ out[4] = (( x3) >> 6) & 0x3ffffff;
+ out[5] = ( x4 ) & 0x1ffffff;
+ out[6] = ((((uint64_t)x5 << 32) | x4) >> 25) & 0x3ffffff;
+ out[7] = ((((uint64_t)x6 << 32) | x5) >> 19) & 0x1ffffff;
+ out[8] = ((((uint64_t)x7 << 32) | x6) >> 12) & 0x3ffffff;
+ out[9] = (( x7) >> 6) & 0x1ffffff;
+ out[10] = 0;
+ out[11] = 0;
+}
+
+/* Take a fully reduced polynomial form number and contract it into a
+ * little-endian, 32-byte array
+ */
+static void
+curve25519_contract(unsigned char out[32], const bignum25519 in) {
+ bignum25519 ALIGN(16) f;
+ curve25519_copy(f, in);
+
+ #define carry_pass() \
+ f[1] += f[0] >> 26; f[0] &= 0x3ffffff; \
+ f[2] += f[1] >> 25; f[1] &= 0x1ffffff; \
+ f[3] += f[2] >> 26; f[2] &= 0x3ffffff; \
+ f[4] += f[3] >> 25; f[3] &= 0x1ffffff; \
+ f[5] += f[4] >> 26; f[4] &= 0x3ffffff; \
+ f[6] += f[5] >> 25; f[5] &= 0x1ffffff; \
+ f[7] += f[6] >> 26; f[6] &= 0x3ffffff; \
+ f[8] += f[7] >> 25; f[7] &= 0x1ffffff; \
+ f[9] += f[8] >> 26; f[8] &= 0x3ffffff;
+
+ #define carry_pass_full() \
+ carry_pass() \
+ f[0] += 19 * (f[9] >> 25); f[9] &= 0x1ffffff;
+
+ #define carry_pass_final() \
+ carry_pass() \
+ f[9] &= 0x1ffffff;
+
+ carry_pass_full()
+ carry_pass_full()
+
+ /* now t is between 0 and 2^255-1, properly carried. */
+ /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
+ f[0] += 19;
+ carry_pass_full()
+
+ /* now between 19 and 2^255-1 in both cases, and offset by 19. */
+ f[0] += (1 << 26) - 19;
+ f[1] += (1 << 25) - 1;
+ f[2] += (1 << 26) - 1;
+ f[3] += (1 << 25) - 1;
+ f[4] += (1 << 26) - 1;
+ f[5] += (1 << 25) - 1;
+ f[6] += (1 << 26) - 1;
+ f[7] += (1 << 25) - 1;
+ f[8] += (1 << 26) - 1;
+ f[9] += (1 << 25) - 1;
+
+ /* now between 2^255 and 2^256-20, and offset by 2^255. */
+ carry_pass_final()
+
+ #undef carry_pass
+ #undef carry_full
+ #undef carry_final
+
+ f[1] <<= 2;
+ f[2] <<= 3;
+ f[3] <<= 5;
+ f[4] <<= 6;
+ f[6] <<= 1;
+ f[7] <<= 3;
+ f[8] <<= 4;
+ f[9] <<= 6;
+
+ #define F(i, s) \
+ out[s+0] |= (unsigned char )(f[i] & 0xff); \
+ out[s+1] = (unsigned char )((f[i] >> 8) & 0xff); \
+ out[s+2] = (unsigned char )((f[i] >> 16) & 0xff); \
+ out[s+3] = (unsigned char )((f[i] >> 24) & 0xff);
+
+ out[0] = 0;
+ out[16] = 0;
+ F(0,0);
+ F(1,3);
+ F(2,6);
+ F(3,9);
+ F(4,12);
+ F(5,16);
+ F(6,19);
+ F(7,22);
+ F(8,25);
+ F(9,28);
+ #undef F
+}
+
+/* if (iswap) swap(a, b) */
+DONNA_INLINE static void
+curve25519_swap_conditional(bignum25519 a, bignum25519 b, uint32_t iswap) {
+ const uint32_t swap = (uint32_t)(-(int32_t)iswap);
+ xmmi a0,a1,a2,b0,b1,b2,x0,x1,x2;
+ xmmi mask = _mm_cvtsi32_si128(swap);
+ mask = _mm_shuffle_epi32(mask, 0);
+ a0 = _mm_load_si128((xmmi *)a + 0);
+ a1 = _mm_load_si128((xmmi *)a + 1);
+ b0 = _mm_load_si128((xmmi *)b + 0);
+ b1 = _mm_load_si128((xmmi *)b + 1);
+ b0 = _mm_xor_si128(a0, b0);
+ b1 = _mm_xor_si128(a1, b1);
+ x0 = _mm_and_si128(b0, mask);
+ x1 = _mm_and_si128(b1, mask);
+ x0 = _mm_xor_si128(x0, a0);
+ x1 = _mm_xor_si128(x1, a1);
+ a0 = _mm_xor_si128(x0, b0);
+ a1 = _mm_xor_si128(x1, b1);
+ _mm_store_si128((xmmi *)a + 0, x0);
+ _mm_store_si128((xmmi *)a + 1, x1);
+ _mm_store_si128((xmmi *)b + 0, a0);
+ _mm_store_si128((xmmi *)b + 1, a1);
+
+ a2 = _mm_load_si128((xmmi *)a + 2);
+ b2 = _mm_load_si128((xmmi *)b + 2);
+ b2 = _mm_xor_si128(a2, b2);
+ x2 = _mm_and_si128(b2, mask);
+ x2 = _mm_xor_si128(x2, a2);
+ a2 = _mm_xor_si128(x2, b2);
+ _mm_store_si128((xmmi *)b + 2, a2);
+ _mm_store_si128((xmmi *)a + 2, x2);
+}
+
+/* out = (flag) ? out : in */
+DONNA_INLINE static void
+curve25519_move_conditional_bytes(uint8_t out[96], const uint8_t in[96], uint32_t flag) {
+ xmmi a0,a1,a2,a3,a4,a5,b0,b1,b2,b3,b4,b5;
+ const uint32_t nb = flag - 1;
+ xmmi masknb = _mm_shuffle_epi32(_mm_cvtsi32_si128(nb),0);
+ a0 = _mm_load_si128((xmmi *)in + 0);
+ a1 = _mm_load_si128((xmmi *)in + 1);
+ a2 = _mm_load_si128((xmmi *)in + 2);
+ b0 = _mm_load_si128((xmmi *)out + 0);
+ b1 = _mm_load_si128((xmmi *)out + 1);
+ b2 = _mm_load_si128((xmmi *)out + 2);
+ a0 = _mm_andnot_si128(masknb, a0);
+ a1 = _mm_andnot_si128(masknb, a1);
+ a2 = _mm_andnot_si128(masknb, a2);
+ b0 = _mm_and_si128(masknb, b0);
+ b1 = _mm_and_si128(masknb, b1);
+ b2 = _mm_and_si128(masknb, b2);
+ a0 = _mm_or_si128(a0, b0);
+ a1 = _mm_or_si128(a1, b1);
+ a2 = _mm_or_si128(a2, b2);
+ _mm_store_si128((xmmi*)out + 0, a0);
+ _mm_store_si128((xmmi*)out + 1, a1);
+ _mm_store_si128((xmmi*)out + 2, a2);
+
+ a3 = _mm_load_si128((xmmi *)in + 3);
+ a4 = _mm_load_si128((xmmi *)in + 4);
+ a5 = _mm_load_si128((xmmi *)in + 5);
+ b3 = _mm_load_si128((xmmi *)out + 3);
+ b4 = _mm_load_si128((xmmi *)out + 4);
+ b5 = _mm_load_si128((xmmi *)out + 5);
+ a3 = _mm_andnot_si128(masknb, a3);
+ a4 = _mm_andnot_si128(masknb, a4);
+ a5 = _mm_andnot_si128(masknb, a5);
+ b3 = _mm_and_si128(masknb, b3);
+ b4 = _mm_and_si128(masknb, b4);
+ b5 = _mm_and_si128(masknb, b5);
+ a3 = _mm_or_si128(a3, b3);
+ a4 = _mm_or_si128(a4, b4);
+ a5 = _mm_or_si128(a5, b5);
+ _mm_store_si128((xmmi*)out + 3, a3);
+ _mm_store_si128((xmmi*)out + 4, a4);
+ _mm_store_si128((xmmi*)out + 5, a5);
+}
+
diff --git a/src/ext/ed25519/donna/ed25519-donna-32bit-sse2.h b/src/ext/ed25519/donna/ed25519-donna-32bit-sse2.h
new file mode 100644
index 0000000000..db04a13d3f
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-32bit-sse2.h
@@ -0,0 +1,513 @@
+#if defined(ED25519_GCC_32BIT_SSE_CHOOSE)
+
+#define HAVE_GE25519_SCALARMULT_BASE_CHOOSE_NIELS
+
+DONNA_NOINLINE static void
+ge25519_scalarmult_base_choose_niels(ge25519_niels *t, const uint8_t table[256][96], uint32_t pos, signed char b) {
+ int32_t breg = (int32_t)b;
+ uint32_t sign = (uint32_t)breg >> 31;
+ uint32_t mask = ~(sign - 1);
+ uint32_t u = (breg + mask) ^ mask;
+
+ __asm__ __volatile__ (
+ /* ysubx+xaddy */
+ "movl %0, %%eax ;\n"
+ "movd %%eax, %%xmm6 ;\n"
+ "pshufd $0x00, %%xmm6, %%xmm6 ;\n"
+ "pxor %%xmm0, %%xmm0 ;\n"
+ "pxor %%xmm1, %%xmm1 ;\n"
+ "pxor %%xmm2, %%xmm2 ;\n"
+ "pxor %%xmm3, %%xmm3 ;\n"
+
+ /* 0 */
+ "movl $0, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movl $1, %%ecx ;\n"
+ "movd %%ecx, %%xmm4 ;\n"
+ "pxor %%xmm5, %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm0 ;\n"
+ "por %%xmm5, %%xmm1 ;\n"
+ "por %%xmm4, %%xmm2 ;\n"
+ "por %%xmm5, %%xmm3 ;\n"
+
+ /* 1 */
+ "movl $1, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 0(%1), %%xmm4 ;\n"
+ "movdqa 16(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm0 ;\n"
+ "por %%xmm5, %%xmm1 ;\n"
+ "movdqa 32(%1), %%xmm4 ;\n"
+ "movdqa 48(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm2 ;\n"
+ "por %%xmm5, %%xmm3 ;\n"
+
+ /* 2 */
+ "movl $2, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 96(%1), %%xmm4 ;\n"
+ "movdqa 112(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm0 ;\n"
+ "por %%xmm5, %%xmm1 ;\n"
+ "movdqa 128(%1), %%xmm4 ;\n"
+ "movdqa 144(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm2 ;\n"
+ "por %%xmm5, %%xmm3 ;\n"
+
+ /* 3 */
+ "movl $3, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 192(%1), %%xmm4 ;\n"
+ "movdqa 208(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm0 ;\n"
+ "por %%xmm5, %%xmm1 ;\n"
+ "movdqa 224(%1), %%xmm4 ;\n"
+ "movdqa 240(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm2 ;\n"
+ "por %%xmm5, %%xmm3 ;\n"
+
+ /* 4 */
+ "movl $4, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 288(%1), %%xmm4 ;\n"
+ "movdqa 304(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm0 ;\n"
+ "por %%xmm5, %%xmm1 ;\n"
+ "movdqa 320(%1), %%xmm4 ;\n"
+ "movdqa 336(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm2 ;\n"
+ "por %%xmm5, %%xmm3 ;\n"
+
+ /* 5 */
+ "movl $5, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 384(%1), %%xmm4 ;\n"
+ "movdqa 400(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm0 ;\n"
+ "por %%xmm5, %%xmm1 ;\n"
+ "movdqa 416(%1), %%xmm4 ;\n"
+ "movdqa 432(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm2 ;\n"
+ "por %%xmm5, %%xmm3 ;\n"
+
+ /* 6 */
+ "movl $6, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 480(%1), %%xmm4 ;\n"
+ "movdqa 496(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm0 ;\n"
+ "por %%xmm5, %%xmm1 ;\n"
+ "movdqa 512(%1), %%xmm4 ;\n"
+ "movdqa 528(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm2 ;\n"
+ "por %%xmm5, %%xmm3 ;\n"
+
+ /* 7 */
+ "movl $7, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 576(%1), %%xmm4 ;\n"
+ "movdqa 592(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm0 ;\n"
+ "por %%xmm5, %%xmm1 ;\n"
+ "movdqa 608(%1), %%xmm4 ;\n"
+ "movdqa 624(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm2 ;\n"
+ "por %%xmm5, %%xmm3 ;\n"
+
+ /* 8 */
+ "movl $8, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 672(%1), %%xmm4 ;\n"
+ "movdqa 688(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm0 ;\n"
+ "por %%xmm5, %%xmm1 ;\n"
+ "movdqa 704(%1), %%xmm4 ;\n"
+ "movdqa 720(%1), %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "por %%xmm4, %%xmm2 ;\n"
+ "por %%xmm5, %%xmm3 ;\n"
+
+ /* conditional swap based on sign */
+ "movl %3, %%ecx ;\n"
+ "movl %2, %%eax ;\n"
+ "xorl $1, %%ecx ;\n"
+ "movd %%ecx, %%xmm6 ;\n"
+ "pxor %%xmm7, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm6, %%xmm6 ;\n"
+ "pxor %%xmm0, %%xmm2 ;\n"
+ "pxor %%xmm1, %%xmm3 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa %%xmm2, %%xmm4 ;\n"
+ "movdqa %%xmm3, %%xmm5 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm5 ;\n"
+ "pxor %%xmm4, %%xmm0 ;\n"
+ "pxor %%xmm5, %%xmm1 ;\n"
+ "pxor %%xmm0, %%xmm2 ;\n"
+ "pxor %%xmm1, %%xmm3 ;\n"
+
+ /* store ysubx */
+ "movd %%xmm0, %%ecx ;\n"
+ "movl %%ecx, %%edx ;\n"
+ "pshufd $0x39, %%xmm0, %%xmm0 ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 0(%%eax) ;\n"
+ "movd %%xmm0, %%ecx ;\n"
+ "pshufd $0x39, %%xmm0, %%xmm0 ;\n"
+ "shrdl $26, %%ecx, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 4(%%eax) ;\n"
+ "movd %%xmm0, %%edx ;\n"
+ "pshufd $0x39, %%xmm0, %%xmm0 ;\n"
+ "shrdl $19, %%edx, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 8(%%eax) ;\n"
+ "movd %%xmm0, %%ecx ;\n"
+ "shrdl $13, %%ecx, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 12(%%eax) ;\n"
+ "movd %%xmm1, %%edx ;\n"
+ "pshufd $0x39, %%xmm1, %%xmm1 ;\n"
+ "shrl $6, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 16(%%eax) ;\n"
+ "movl %%edx, %%ecx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 20(%%eax) ;\n"
+ "movd %%xmm1, %%edx ;\n"
+ "pshufd $0x39, %%xmm1, %%xmm1 ;\n"
+ "shrdl $25, %%edx, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 24(%%eax) ;\n"
+ "movd %%xmm1, %%ecx ;\n"
+ "pshufd $0x39, %%xmm1, %%xmm1 ;\n"
+ "shrdl $19, %%ecx, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 28(%%eax) ;\n"
+ "movd %%xmm1, %%edx ;\n"
+ "shrdl $12, %%edx, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 32(%%eax) ;\n"
+ "shrl $6, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "xorl %%ecx, %%ecx ;\n"
+ "movl %%edx, 36(%%eax) ;\n"
+ "movl %%ecx, 40(%%eax) ;\n"
+ "movl %%ecx, 44(%%eax) ;\n"
+
+ /* store xaddy */
+ "addl $48, %%eax ;\n"
+ "movdqa %%xmm2, %%xmm0 ;\n"
+ "movdqa %%xmm3, %%xmm1 ;\n"
+ "movd %%xmm0, %%ecx ;\n"
+ "movl %%ecx, %%edx ;\n"
+ "pshufd $0x39, %%xmm0, %%xmm0 ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 0(%%eax) ;\n"
+ "movd %%xmm0, %%ecx ;\n"
+ "pshufd $0x39, %%xmm0, %%xmm0 ;\n"
+ "shrdl $26, %%ecx, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 4(%%eax) ;\n"
+ "movd %%xmm0, %%edx ;\n"
+ "pshufd $0x39, %%xmm0, %%xmm0 ;\n"
+ "shrdl $19, %%edx, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 8(%%eax) ;\n"
+ "movd %%xmm0, %%ecx ;\n"
+ "shrdl $13, %%ecx, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 12(%%eax) ;\n"
+ "movd %%xmm1, %%edx ;\n"
+ "pshufd $0x39, %%xmm1, %%xmm1 ;\n"
+ "shrl $6, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 16(%%eax) ;\n"
+ "movl %%edx, %%ecx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 20(%%eax) ;\n"
+ "movd %%xmm1, %%edx ;\n"
+ "pshufd $0x39, %%xmm1, %%xmm1 ;\n"
+ "shrdl $25, %%edx, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 24(%%eax) ;\n"
+ "movd %%xmm1, %%ecx ;\n"
+ "pshufd $0x39, %%xmm1, %%xmm1 ;\n"
+ "shrdl $19, %%ecx, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 28(%%eax) ;\n"
+ "movd %%xmm1, %%edx ;\n"
+ "shrdl $12, %%edx, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 32(%%eax) ;\n"
+ "shrl $6, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "xorl %%ecx, %%ecx ;\n"
+ "movl %%edx, 36(%%eax) ;\n"
+ "movl %%ecx, 40(%%eax) ;\n"
+ "movl %%ecx, 44(%%eax) ;\n"
+
+ /* t2d */
+ "movl %0, %%eax ;\n"
+ "movd %%eax, %%xmm6 ;\n"
+ "pshufd $0x00, %%xmm6, %%xmm6 ;\n"
+ "pxor %%xmm0, %%xmm0 ;\n"
+ "pxor %%xmm1, %%xmm1 ;\n"
+
+ /* 0 */
+ "movl $0, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "pxor %%xmm0, %%xmm0 ;\n"
+ "pxor %%xmm1, %%xmm1 ;\n"
+
+ /* 1 */
+ "movl $1, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 64(%1), %%xmm3 ;\n"
+ "movdqa 80(%1), %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm3 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "por %%xmm3, %%xmm0 ;\n"
+ "por %%xmm4, %%xmm1 ;\n"
+
+ /* 2 */
+ "movl $2, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 160(%1), %%xmm3 ;\n"
+ "movdqa 176(%1), %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm3 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "por %%xmm3, %%xmm0 ;\n"
+ "por %%xmm4, %%xmm1 ;\n"
+
+ /* 3 */
+ "movl $3, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 256(%1), %%xmm3 ;\n"
+ "movdqa 272(%1), %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm3 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "por %%xmm3, %%xmm0 ;\n"
+ "por %%xmm4, %%xmm1 ;\n"
+
+ /* 4 */
+ "movl $4, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 352(%1), %%xmm3 ;\n"
+ "movdqa 368(%1), %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm3 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "por %%xmm3, %%xmm0 ;\n"
+ "por %%xmm4, %%xmm1 ;\n"
+
+ /* 5 */
+ "movl $5, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 448(%1), %%xmm3 ;\n"
+ "movdqa 464(%1), %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm3 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "por %%xmm3, %%xmm0 ;\n"
+ "por %%xmm4, %%xmm1 ;\n"
+
+ /* 6 */
+ "movl $6, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 544(%1), %%xmm3 ;\n"
+ "movdqa 560(%1), %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm3 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "por %%xmm3, %%xmm0 ;\n"
+ "por %%xmm4, %%xmm1 ;\n"
+
+ /* 7 */
+ "movl $7, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 640(%1), %%xmm3 ;\n"
+ "movdqa 656(%1), %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm3 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "por %%xmm3, %%xmm0 ;\n"
+ "por %%xmm4, %%xmm1 ;\n"
+
+ /* 8 */
+ "movl $8, %%eax ;\n"
+ "movd %%eax, %%xmm7 ;\n"
+ "pshufd $0x00, %%xmm7, %%xmm7 ;\n"
+ "pcmpeqd %%xmm6, %%xmm7 ;\n"
+ "movdqa 736(%1), %%xmm3 ;\n"
+ "movdqa 752(%1), %%xmm4 ;\n"
+ "pand %%xmm7, %%xmm3 ;\n"
+ "pand %%xmm7, %%xmm4 ;\n"
+ "por %%xmm3, %%xmm0 ;\n"
+ "por %%xmm4, %%xmm1 ;\n"
+
+ /* store t2d */
+ "movl %2, %%eax ;\n"
+ "addl $96, %%eax ;\n"
+ "movd %%xmm0, %%ecx ;\n"
+ "movl %%ecx, %%edx ;\n"
+ "pshufd $0x39, %%xmm0, %%xmm0 ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 0(%%eax) ;\n"
+ "movd %%xmm0, %%ecx ;\n"
+ "pshufd $0x39, %%xmm0, %%xmm0 ;\n"
+ "shrdl $26, %%ecx, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 4(%%eax) ;\n"
+ "movd %%xmm0, %%edx ;\n"
+ "pshufd $0x39, %%xmm0, %%xmm0 ;\n"
+ "shrdl $19, %%edx, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 8(%%eax) ;\n"
+ "movd %%xmm0, %%ecx ;\n"
+ "shrdl $13, %%ecx, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 12(%%eax) ;\n"
+ "movd %%xmm1, %%edx ;\n"
+ "pshufd $0x39, %%xmm1, %%xmm1 ;\n"
+ "shrl $6, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 16(%%eax) ;\n"
+ "movl %%edx, %%ecx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 20(%%eax) ;\n"
+ "movd %%xmm1, %%edx ;\n"
+ "pshufd $0x39, %%xmm1, %%xmm1 ;\n"
+ "shrdl $25, %%edx, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 24(%%eax) ;\n"
+ "movd %%xmm1, %%ecx ;\n"
+ "pshufd $0x39, %%xmm1, %%xmm1 ;\n"
+ "shrdl $19, %%ecx, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "movl %%edx, 28(%%eax) ;\n"
+ "movd %%xmm1, %%edx ;\n"
+ "movd %%xmm1, %%edx ;\n"
+ "shrdl $12, %%edx, %%ecx ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "movl %%ecx, 32(%%eax) ;\n"
+ "shrl $6, %%edx ;\n"
+ "andl $0x1ffffff, %%edx ;\n"
+ "xorl %%ecx, %%ecx ;\n"
+ "movl %%edx, 36(%%eax) ;\n"
+ "movl %%ecx, 40(%%eax) ;\n"
+ "movl %%ecx, 44(%%eax) ;\n"
+ "movdqa 0(%%eax), %%xmm0 ;\n"
+ "movdqa 16(%%eax), %%xmm1 ;\n"
+ "movdqa 32(%%eax), %%xmm2 ;\n"
+
+ /* conditionally negate t2d */
+
+ /* set up 2p in to 3/4 */
+ "movl $0x7ffffda, %%ecx ;\n"
+ "movl $0x3fffffe, %%edx ;\n"
+ "movd %%ecx, %%xmm3 ;\n"
+ "movd %%edx, %%xmm5 ;\n"
+ "movl $0x7fffffe, %%ecx ;\n"
+ "movd %%ecx, %%xmm4 ;\n"
+ "punpckldq %%xmm5, %%xmm3 ;\n"
+ "punpckldq %%xmm5, %%xmm4 ;\n"
+ "punpcklqdq %%xmm4, %%xmm3 ;\n"
+ "movdqa %%xmm4, %%xmm5 ;\n"
+ "punpcklqdq %%xmm4, %%xmm4 ;\n"
+
+ /* subtract and conditionally move */
+ "movl %3, %%ecx ;\n"
+ "sub $1, %%ecx ;\n"
+ "movd %%ecx, %%xmm6 ;\n"
+ "pshufd $0x00, %%xmm6, %%xmm6 ;\n"
+ "movdqa %%xmm6, %%xmm7 ;\n"
+ "psubd %%xmm0, %%xmm3 ;\n"
+ "psubd %%xmm1, %%xmm4 ;\n"
+ "psubd %%xmm2, %%xmm5 ;\n"
+ "pand %%xmm6, %%xmm0 ;\n"
+ "pand %%xmm6, %%xmm1 ;\n"
+ "pand %%xmm6, %%xmm2 ;\n"
+ "pandn %%xmm3, %%xmm6 ;\n"
+ "movdqa %%xmm7, %%xmm3 ;\n"
+ "pandn %%xmm4, %%xmm7 ;\n"
+ "pandn %%xmm5, %%xmm3 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm3, %%xmm2 ;\n"
+
+ /* store */
+ "movdqa %%xmm0, 0(%%eax) ;\n"
+ "movdqa %%xmm1, 16(%%eax) ;\n"
+ "movdqa %%xmm2, 32(%%eax) ;\n"
+ :
+ : "m"(u), "r"(&table[pos * 8]), "m"(t), "m"(sign) /* %0 = u, %1 = table, %2 = t, %3 = sign */
+ : "%eax", "%ecx", "%edx", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "cc", "memory"
+ );
+}
+
+#endif /* defined(ED25519_GCC_32BIT_SSE_CHOOSE) */
+
diff --git a/src/ext/ed25519/donna/ed25519-donna-32bit-tables.h b/src/ext/ed25519/donna/ed25519-donna-32bit-tables.h
new file mode 100644
index 0000000000..c977c26ebc
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-32bit-tables.h
@@ -0,0 +1,61 @@
+static const ge25519 ALIGN(16) ge25519_basepoint = {
+ {0x0325d51a,0x018b5823,0x00f6592a,0x0104a92d,0x01a4b31d,0x01d6dc5c,0x027118fe,0x007fd814,0x013cd6e5,0x0085a4db},
+ {0x02666658,0x01999999,0x00cccccc,0x01333333,0x01999999,0x00666666,0x03333333,0x00cccccc,0x02666666,0x01999999},
+ {0x00000001,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000},
+ {0x01b7dda3,0x01a2ace9,0x025eadbb,0x0003ba8a,0x0083c27e,0x00abe37d,0x01274732,0x00ccacdd,0x00fd78b7,0x019e1d7c}
+};
+
+/*
+ d
+*/
+
+static const bignum25519 ALIGN(16) ge25519_ecd = {
+ 0x035978a3,0x00d37284,0x03156ebd,0x006a0a0e,0x0001c029,0x0179e898,0x03a03cbb,0x01ce7198,0x02e2b6ff,0x01480db3
+};
+
+static const bignum25519 ALIGN(16) ge25519_ec2d = {
+ 0x02b2f159,0x01a6e509,0x022add7a,0x00d4141d,0x00038052,0x00f3d130,0x03407977,0x019ce331,0x01c56dff,0x00901b67
+};
+
+/*
+ sqrt(-1)
+*/
+
+static const bignum25519 ALIGN(16) ge25519_sqrtneg1 = {
+ 0x020ea0b0,0x0186c9d2,0x008f189d,0x0035697f,0x00bd0c60,0x01fbd7a7,0x02804c9e,0x01e16569,0x0004fc1d,0x00ae0c92
+};
+
+static const ge25519_niels ALIGN(16) ge25519_niels_sliding_multiples[32] = {
+ {{0x0340913e,0x000e4175,0x03d673a2,0x002e8a05,0x03f4e67c,0x008f8a09,0x00c21a34,0x004cf4b8,0x01298f81,0x0113f4be},{0x018c3b85,0x0124f1bd,0x01c325f7,0x0037dc60,0x033e4cb7,0x003d42c2,0x01a44c32,0x014ca4e1,0x03a33d4b,0x001f3e74},{0x037aaa68,0x00448161,0x0093d579,0x011e6556,0x009b67a0,0x0143598c,0x01bee5ee,0x00b50b43,0x0289f0c6,0x01bc45ed}},
+ {{0x00fcd265,0x0047fa29,0x034faacc,0x01ef2e0d,0x00ef4d4f,0x014bd6bd,0x00f98d10,0x014c5026,0x007555bd,0x00aae456},{0x00ee9730,0x016c2a13,0x017155e4,0x01874432,0x00096a10,0x01016732,0x01a8014f,0x011e9823,0x01b9a80f,0x01e85938},{0x01d0d889,0x01a4cfc3,0x034c4295,0x0110e1ae,0x0162508c,0x00f2db4c,0x0072a2c6,0x0098da2e,0x02f12b9b,0x0168a09a}},
+ {{0x0047d6ba,0x0060b0e9,0x0136eff2,0x008a5939,0x03540053,0x0064a087,0x02788e5c,0x00be7c67,0x033eb1b5,0x005529f9},{0x00a5bb33,0x00af1102,0x01a05442,0x001e3af7,0x02354123,0x00bfec44,0x01f5862d,0x00dd7ba3,0x03146e20,0x00a51733},{0x012a8285,0x00f6fc60,0x023f9797,0x003e85ee,0x009c3820,0x01bda72d,0x01b3858d,0x00d35683,0x0296b3bb,0x010eaaf9}},
+ {{0x023221b1,0x01cb26aa,0x0074f74d,0x0099ddd1,0x01b28085,0x00192c3a,0x013b27c9,0x00fc13bd,0x01d2e531,0x0075bb75},{0x004ea3bf,0x00973425,0x001a4d63,0x01d59cee,0x01d1c0d4,0x00542e49,0x01294114,0x004fce36,0x029283c9,0x01186fa9},{0x01b8b3a2,0x00db7200,0x00935e30,0x003829f5,0x02cc0d7d,0x0077adf3,0x0220dd2c,0x0014ea53,0x01c6a0f9,0x01ea7eec}},
+ {{0x039d8064,0x01885f80,0x00337e6d,0x01b7a902,0x02628206,0x015eb044,0x01e30473,0x0191f2d9,0x011fadc9,0x01270169},{0x02a8632f,0x0199e2a9,0x00d8b365,0x017a8de2,0x02994279,0x0086f5b5,0x0119e4e3,0x01eb39d6,0x0338add7,0x00d2e7b4},{0x0045af1b,0x013a2fe4,0x0245e0d6,0x014538ce,0x038bfe0f,0x01d4cf16,0x037e14c9,0x0160d55e,0x0021b008,0x01cf05c8}},
+ {{0x01864348,0x01d6c092,0x0070262b,0x014bb844,0x00fb5acd,0x008deb95,0x003aaab5,0x00eff474,0x00029d5c,0x0062ad66},{0x02802ade,0x01c02122,0x01c4e5f7,0x00781181,0x039767fb,0x01703406,0x0342388b,0x01f5e227,0x022546d8,0x0109d6ab},{0x016089e9,0x00cb317f,0x00949b05,0x01099417,0x000c7ad2,0x011a8622,0x0088ccda,0x01290886,0x022b53df,0x00f71954}},
+ {{0x027fbf93,0x01c04ecc,0x01ed6a0d,0x004cdbbb,0x02bbf3af,0x00ad5968,0x01591955,0x0094f3a2,0x02d17602,0x00099e20},{0x02007f6d,0x003088a8,0x03db77ee,0x00d5ade6,0x02fe12ce,0x0107ba07,0x0107097d,0x00482a6f,0x02ec346f,0x008d3f5f},{0x032ea378,0x0028465c,0x028e2a6c,0x018efc6e,0x0090df9a,0x01a7e533,0x039bfc48,0x010c745d,0x03daa097,0x0125ee9b}},
+ {{0x028ccf0b,0x00f36191,0x021ac081,0x012154c8,0x034e0a6e,0x01b25192,0x00180403,0x01d7eea1,0x00218d05,0x010ed735},{0x03cfeaa0,0x01b300c4,0x008da499,0x0068c4e1,0x0219230a,0x01f2d4d0,0x02defd60,0x00e565b7,0x017f12de,0x018788a4},{0x03d0b516,0x009d8be6,0x03ddcbb3,0x0071b9fe,0x03ace2bd,0x01d64270,0x032d3ec9,0x01084065,0x0210ae4d,0x01447584}},
+ {{0x0020de87,0x00e19211,0x01b68102,0x00b5ac97,0x022873c0,0x01942d25,0x01271394,0x0102073f,0x02fe2482,0x01c69ff9},{0x010e9d81,0x019dbbe5,0x0089f258,0x006e06b8,0x02951883,0x018f1248,0x019b3237,0x00bc7553,0x024ddb85,0x01b4c964},{0x01c8c854,0x0060ae29,0x01406d8e,0x01cff2f9,0x00cff451,0x01778d0c,0x03ac8c41,0x01552e59,0x036559ee,0x011d1b12}},
+ {{0x00741147,0x0151b219,0x01092690,0x00e877e6,0x01f4d6bb,0x0072a332,0x01cd3b03,0x00dadff2,0x0097db5e,0x0086598d},{0x01c69a2b,0x01decf1b,0x02c2fa6e,0x013b7c4f,0x037beac8,0x013a16b5,0x028e7bda,0x01f6e8ac,0x01e34fe9,0x01726947},{0x01f10e67,0x003c73de,0x022b7ea2,0x010f32c2,0x03ff776a,0x00142277,0x01d38b88,0x00776138,0x03c60822,0x01201140}},
+ {{0x0236d175,0x0008748e,0x03c6476d,0x013f4cdc,0x02eed02a,0x00838a47,0x032e7210,0x018bcbb3,0x00858de4,0x01dc7826},{0x00a37fc7,0x0127b40b,0x01957884,0x011d30ad,0x02816683,0x016e0e23,0x00b76be4,0x012db115,0x02516506,0x0154ce62},{0x00451edf,0x00bd749e,0x03997342,0x01cc2c4c,0x00eb6975,0x01a59508,0x03a516cf,0x00c228ef,0x0168ff5a,0x01697b47}},
+ {{0x00527359,0x01783156,0x03afd75c,0x00ce56dc,0x00e4b970,0x001cabe9,0x029e0f6d,0x0188850c,0x0135fefd,0x00066d80},{0x02150e83,0x01448abf,0x02bb0232,0x012bf259,0x033c8268,0x00711e20,0x03fc148f,0x005e0e70,0x017d8bf9,0x0112b2e2},{0x02134b83,0x001a0517,0x0182c3cc,0x00792182,0x0313d799,0x001a3ed7,0x0344547e,0x01f24a0d,0x03de6ad2,0x00543127}},
+ {{0x00dca868,0x00618f27,0x015a1709,0x00ddc38a,0x0320fd13,0x0036168d,0x0371ab06,0x01783fc7,0x0391e05f,0x01e29b5d},{0x01471138,0x00fca542,0x00ca31cf,0x01ca7bad,0x0175bfbc,0x01a708ad,0x03bce212,0x01244215,0x0075bb99,0x01acad68},{0x03a0b976,0x01dc12d1,0x011aab17,0x00aba0ba,0x029806cd,0x0142f590,0x018fd8ea,0x01a01545,0x03c4ad55,0x01c971ff}},
+ {{0x00d098c0,0x000afdc7,0x006cd230,0x01276af3,0x03f905b2,0x0102994c,0x002eb8a4,0x015cfbeb,0x025f855f,0x01335518},{0x01cf99b2,0x0099c574,0x01a69c88,0x00881510,0x01cd4b54,0x0112109f,0x008abdc5,0x0074647a,0x0277cb1f,0x01e53324},{0x02ac5053,0x01b109b0,0x024b095e,0x016997b3,0x02f26bb6,0x00311021,0x00197885,0x01d0a55a,0x03b6fcc8,0x01c020d5}},
+ {{0x02584a34,0x00e7eee0,0x03257a03,0x011e95a3,0x011ead91,0x00536202,0x00b1ce24,0x008516c6,0x03669d6d,0x004ea4a8},{0x00773f01,0x0019c9ce,0x019f6171,0x01d4afde,0x02e33323,0x01ad29b6,0x02ead1dc,0x01ed51a5,0x01851ad0,0x001bbdfa},{0x00577de5,0x00ddc730,0x038b9952,0x00f281ae,0x01d50390,0x0002e071,0x000780ec,0x010d448d,0x01f8a2af,0x00f0a5b7}},
+ {{0x031f2541,0x00d34bae,0x0323ff9d,0x003a056d,0x02e25443,0x00a1ad05,0x00d1bee8,0x002f7f8e,0x03007477,0x002a24b1},{0x0114a713,0x01457e76,0x032255d5,0x01cc647f,0x02a4bdef,0x0153d730,0x00118bcf,0x00f755ff,0x013490c7,0x01ea674e},{0x02bda3e8,0x00bb490d,0x00f291ea,0x000abf40,0x01dea321,0x002f9ce0,0x00b2b193,0x00fa54b5,0x0128302f,0x00a19d8b}},
+ {{0x022ef5bd,0x01638af3,0x038c6f8a,0x01a33a3d,0x039261b2,0x01bb89b8,0x010bcf9d,0x00cf42a9,0x023d6f17,0x01da1bca},{0x00e35b25,0x000d824f,0x0152e9cf,0x00ed935d,0x020b8460,0x01c7b83f,0x00c969e5,0x01a74198,0x0046a9d9,0x00cbc768},{0x01597c6a,0x0144a99b,0x00a57551,0x0018269c,0x023c464c,0x0009b022,0x00ee39e1,0x0114c7f2,0x038a9ad2,0x01584c17}},
+ {{0x03b0c0d5,0x00b30a39,0x038a6ce4,0x01ded83a,0x01c277a6,0x01010a61,0x0346d3eb,0x018d995e,0x02f2c57c,0x000c286b},{0x0092aed1,0x0125e37b,0x027ca201,0x001a6b6b,0x03290f55,0x0047ba48,0x018d916c,0x01a59062,0x013e35d4,0x0002abb1},{0x003ad2aa,0x007ddcc0,0x00c10f76,0x0001590b,0x002cfca6,0x000ed23e,0x00ee4329,0x00900f04,0x01c24065,0x0082fa70}},
+ {{0x02025e60,0x003912b8,0x0327041c,0x017e5ee5,0x02c0ecec,0x015a0d1c,0x02b1ce7c,0x0062220b,0x0145067e,0x01a5d931},{0x009673a6,0x00e1f609,0x00927c2a,0x016faa37,0x01650ef0,0x016f63b5,0x03cd40e1,0x003bc38f,0x0361f0ac,0x01d42acc},{0x02f81037,0x008ca0e8,0x017e23d1,0x011debfe,0x01bcbb68,0x002e2563,0x03e8add6,0x000816e5,0x03fb7075,0x0153e5ac}},
+ {{0x02b11ecd,0x016bf185,0x008f22ef,0x00e7d2bb,0x0225d92e,0x00ece785,0x00508873,0x017e16f5,0x01fbe85d,0x01e39a0e},{0x01669279,0x017c810a,0x024941f5,0x0023ebeb,0x00eb7688,0x005760f1,0x02ca4146,0x0073cde7,0x0052bb75,0x00f5ffa7},{0x03b8856b,0x00cb7dcd,0x02f14e06,0x001820d0,0x01d74175,0x00e59e22,0x03fba550,0x00484641,0x03350088,0x01c3c9a3}},
+ {{0x00dcf355,0x0104481c,0x0022e464,0x01f73fe7,0x00e03325,0x0152b698,0x02ef769a,0x00973663,0x00039b8c,0x0101395b},{0x01805f47,0x019160ec,0x03832cd0,0x008b06eb,0x03d4d717,0x004cb006,0x03a75b8f,0x013b3d30,0x01cfad88,0x01f034d1},{0x0078338a,0x01c7d2e3,0x02bc2b23,0x018b3f05,0x0280d9aa,0x005f3d44,0x0220a95a,0x00eeeb97,0x0362aaec,0x00835d51}},
+ {{0x01b9f543,0x013fac4d,0x02ad93ae,0x018ef464,0x0212cdf7,0x01138ba9,0x011583ab,0x019c3d26,0x028790b4,0x00e2e2b6},{0x033bb758,0x01f0dbf1,0x03734bd1,0x0129b1e5,0x02b3950e,0x003bc922,0x01a53ec8,0x018c5532,0x006f3cee,0x00ae3c79},{0x0351f95d,0x0012a737,0x03d596b8,0x017658fe,0x00ace54a,0x008b66da,0x0036c599,0x012a63a2,0x032ceba1,0x00126bac}},
+ {{0x03dcfe7e,0x019f4f18,0x01c81aee,0x0044bc2b,0x00827165,0x014f7c13,0x03b430f0,0x00bf96cc,0x020c8d62,0x01471997},{0x01fc7931,0x001f42dd,0x00ba754a,0x005bd339,0x003fbe49,0x016b3930,0x012a159c,0x009f83b0,0x03530f67,0x01e57b85},{0x02ecbd81,0x0096c294,0x01fce4a9,0x017701a5,0x0175047d,0x00ee4a31,0x012686e5,0x008efcd4,0x0349dc54,0x01b3466f}},
+ {{0x02179ca3,0x01d86414,0x03f0afd0,0x00305964,0x015c7428,0x0099711e,0x015d5442,0x00c71014,0x01b40b2e,0x01d483cf},{0x01afc386,0x01984859,0x036203ff,0x0045c6a8,0x0020a8aa,0x00990baa,0x03313f10,0x007ceede,0x027429e4,0x017806ce},{0x039357a1,0x0142f8f4,0x0294a7b6,0x00eaccf4,0x0259edb3,0x01311e6e,0x004d326f,0x0130c346,0x01ccef3c,0x01c424b2}},
+ {{0x0364918c,0x00148fc0,0x01638a7b,0x01a1fd5b,0x028ad013,0x0081e5a4,0x01a54f33,0x0174e101,0x003d0257,0x003a856c},{0x00051dcf,0x00f62b1d,0x0143d0ad,0x0042adbd,0x000fda90,0x01743ceb,0x0173e5e4,0x017bc749,0x03b7137a,0x0105ce96},{0x00f9218a,0x015b8c7c,0x00e102f8,0x0158d7e2,0x0169a5b8,0x00b2f176,0x018b347a,0x014cfef2,0x0214a4e3,0x017f1595}},
+ {{0x006d7ae5,0x0195c371,0x0391e26d,0x0062a7c6,0x003f42ab,0x010dad86,0x024f8198,0x01542b2a,0x0014c454,0x0189c471},{0x0390988e,0x00b8799d,0x02e44912,0x0078e2e6,0x00075654,0x01923eed,0x0040cd72,0x00a37c76,0x0009d466,0x00c8531d},{0x02651770,0x00609d01,0x0286c265,0x0134513c,0x00ee9281,0x005d223c,0x035c760c,0x00679b36,0x0073ecb8,0x016faa50}},
+ {{0x02c89be4,0x016fc244,0x02f38c83,0x018beb72,0x02b3ce2c,0x0097b065,0x034f017b,0x01dd957f,0x00148f61,0x00eab357},{0x0343d2f8,0x003398fc,0x011e368e,0x00782a1f,0x00019eea,0x00117b6f,0x0128d0d1,0x01a5e6bb,0x01944f1b,0x012b41e1},{0x03318301,0x018ecd30,0x0104d0b1,0x0038398b,0x03726701,0x019da88c,0x002d9769,0x00a7a681,0x031d9028,0x00ebfc32}},
+ {{0x0220405e,0x0171face,0x02d930f8,0x017f6d6a,0x023b8c47,0x0129d5f9,0x02972456,0x00a3a524,0x006f4cd2,0x004439fa},{0x00c53505,0x0190c2fd,0x00507244,0x009930f9,0x01a39270,0x01d327c6,0x0399bc47,0x01cfe13d,0x0332bd99,0x00b33e7d},{0x0203f5e4,0x003627b5,0x00018af8,0x01478581,0x004a2218,0x002e3bb7,0x039384d0,0x0146ea62,0x020b9693,0x0017155f}},
+ {{0x03c97e6f,0x00738c47,0x03b5db1f,0x01808fcf,0x01e8fc98,0x01ed25dd,0x01bf5045,0x00eb5c2b,0x0178fe98,0x01b85530},{0x01c20eb0,0x01aeec22,0x030b9eee,0x01b7d07e,0x0187e16f,0x014421fb,0x009fa731,0x0040b6d7,0x00841861,0x00a27fbc},{0x02d69abf,0x0058cdbf,0x0129f9ec,0x013c19ae,0x026c5b93,0x013a7fe7,0x004bb2ba,0x0063226f,0x002a95ca,0x01abefd9}},
+ {{0x02f5d2c1,0x00378318,0x03734fb5,0x01258073,0x0263f0f6,0x01ad70e0,0x01b56d06,0x01188fbd,0x011b9503,0x0036d2e1},{0x0113a8cc,0x01541c3e,0x02ac2bbc,0x01d95867,0x01f47459,0x00ead489,0x00ab5b48,0x01db3b45,0x00edb801,0x004b024f},{0x00b8190f,0x011fe4c2,0x00621f82,0x010508d7,0x001a5a76,0x00c7d7fd,0x03aab96d,0x019cd9dc,0x019c6635,0x00ceaa1e}},
+ {{0x01085cf2,0x01fd47af,0x03e3f5e1,0x004b3e99,0x01e3d46a,0x0060033c,0x015ff0a8,0x0150cdd8,0x029e8e21,0x008cf1bc},{0x00156cb1,0x003d623f,0x01a4f069,0x00d8d053,0x01b68aea,0x01ca5ab6,0x0316ae43,0x0134dc44,0x001c8d58,0x0084b343},{0x0318c781,0x0135441f,0x03a51a5e,0x019293f4,0x0048bb37,0x013d3341,0x0143151e,0x019c74e1,0x00911914,0x0076ddde}},
+ {{0x006bc26f,0x00d48e5f,0x00227bbe,0x00629ea8,0x01ea5f8b,0x0179a330,0x027a1d5f,0x01bf8f8e,0x02d26e2a,0x00c6b65e},{0x01701ab6,0x0051da77,0x01b4b667,0x00a0ce7c,0x038ae37b,0x012ac852,0x03a0b0fe,0x0097c2bb,0x00a017d2,0x01eb8b2a},{0x0120b962,0x0005fb42,0x0353b6fd,0x0061f8ce,0x007a1463,0x01560a64,0x00e0a792,0x01907c92,0x013a6622,0x007b47f1}}
+};
diff --git a/src/ext/ed25519/donna/ed25519-donna-64bit-sse2.h b/src/ext/ed25519/donna/ed25519-donna-64bit-sse2.h
new file mode 100644
index 0000000000..ca08651d67
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-64bit-sse2.h
@@ -0,0 +1,436 @@
+#if defined(ED25519_GCC_64BIT_SSE_CHOOSE)
+
+#define HAVE_GE25519_SCALARMULT_BASE_CHOOSE_NIELS
+
+DONNA_NOINLINE static void
+ge25519_scalarmult_base_choose_niels(ge25519_niels *t, const uint8_t table[256][96], uint32_t pos, signed char b) {
+ int64_t breg = (int64_t)b;
+ uint64_t sign = (uint64_t)breg >> 63;
+ uint64_t mask = ~(sign - 1);
+ uint64_t u = (breg + mask) ^ mask;
+
+ __asm__ __volatile__ (
+ /* ysubx+xaddy+t2d */
+ "movq %0, %%rax ;\n"
+ "movd %%rax, %%xmm14 ;\n"
+ "pshufd $0x00, %%xmm14, %%xmm14 ;\n"
+ "pxor %%xmm0, %%xmm0 ;\n"
+ "pxor %%xmm1, %%xmm1 ;\n"
+ "pxor %%xmm2, %%xmm2 ;\n"
+ "pxor %%xmm3, %%xmm3 ;\n"
+ "pxor %%xmm4, %%xmm4 ;\n"
+ "pxor %%xmm5, %%xmm5 ;\n"
+
+ /* 0 */
+ "movq $0, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movq $1, %%rax ;\n"
+ "movd %%rax, %%xmm6 ;\n"
+ "pxor %%xmm7, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm6, %%xmm2 ;\n"
+ "por %%xmm7, %%xmm3 ;\n"
+
+ /* 1 */
+ "movq $1, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 0(%1), %%xmm6 ;\n"
+ "movdqa 16(%1), %%xmm7 ;\n"
+ "movdqa 32(%1), %%xmm8 ;\n"
+ "movdqa 48(%1), %%xmm9 ;\n"
+ "movdqa 64(%1), %%xmm10 ;\n"
+ "movdqa 80(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 2 */
+ "movq $2, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 96(%1), %%xmm6 ;\n"
+ "movdqa 112(%1), %%xmm7 ;\n"
+ "movdqa 128(%1), %%xmm8 ;\n"
+ "movdqa 144(%1), %%xmm9 ;\n"
+ "movdqa 160(%1), %%xmm10 ;\n"
+ "movdqa 176(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 3 */
+ "movq $3, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 192(%1), %%xmm6 ;\n"
+ "movdqa 208(%1), %%xmm7 ;\n"
+ "movdqa 224(%1), %%xmm8 ;\n"
+ "movdqa 240(%1), %%xmm9 ;\n"
+ "movdqa 256(%1), %%xmm10 ;\n"
+ "movdqa 272(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 4 */
+ "movq $4, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 288(%1), %%xmm6 ;\n"
+ "movdqa 304(%1), %%xmm7 ;\n"
+ "movdqa 320(%1), %%xmm8 ;\n"
+ "movdqa 336(%1), %%xmm9 ;\n"
+ "movdqa 352(%1), %%xmm10 ;\n"
+ "movdqa 368(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 5 */
+ "movq $5, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 384(%1), %%xmm6 ;\n"
+ "movdqa 400(%1), %%xmm7 ;\n"
+ "movdqa 416(%1), %%xmm8 ;\n"
+ "movdqa 432(%1), %%xmm9 ;\n"
+ "movdqa 448(%1), %%xmm10 ;\n"
+ "movdqa 464(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 6 */
+ "movq $6, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 480(%1), %%xmm6 ;\n"
+ "movdqa 496(%1), %%xmm7 ;\n"
+ "movdqa 512(%1), %%xmm8 ;\n"
+ "movdqa 528(%1), %%xmm9 ;\n"
+ "movdqa 544(%1), %%xmm10 ;\n"
+ "movdqa 560(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 7 */
+ "movq $7, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 576(%1), %%xmm6 ;\n"
+ "movdqa 592(%1), %%xmm7 ;\n"
+ "movdqa 608(%1), %%xmm8 ;\n"
+ "movdqa 624(%1), %%xmm9 ;\n"
+ "movdqa 640(%1), %%xmm10 ;\n"
+ "movdqa 656(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 8 */
+ "movq $8, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 672(%1), %%xmm6 ;\n"
+ "movdqa 688(%1), %%xmm7 ;\n"
+ "movdqa 704(%1), %%xmm8 ;\n"
+ "movdqa 720(%1), %%xmm9 ;\n"
+ "movdqa 736(%1), %%xmm10 ;\n"
+ "movdqa 752(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* conditionally swap ysubx and xaddy */
+ "movq %3, %%rax ;\n"
+ "xorq $1, %%rax ;\n"
+ "movd %%rax, %%xmm14 ;\n"
+ "pxor %%xmm15, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm14, %%xmm14 ;\n"
+ "pxor %%xmm0, %%xmm2 ;\n"
+ "pxor %%xmm1, %%xmm3 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa %%xmm2, %%xmm6 ;\n"
+ "movdqa %%xmm3, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pxor %%xmm6, %%xmm0 ;\n"
+ "pxor %%xmm7, %%xmm1 ;\n"
+ "pxor %%xmm0, %%xmm2 ;\n"
+ "pxor %%xmm1, %%xmm3 ;\n"
+
+ /* store ysubx */
+ "xorq %%rax, %%rax ;\n"
+ "movd %%xmm0, %%rcx ;\n"
+ "movd %%xmm0, %%r8 ;\n"
+ "movd %%xmm1, %%rsi ;\n"
+ "pshufd $0xee, %%xmm0, %%xmm0 ;\n"
+ "pshufd $0xee, %%xmm1, %%xmm1 ;\n"
+ "movd %%xmm0, %%rdx ;\n"
+ "movd %%xmm1, %%rdi ;\n"
+ "shrdq $51, %%rdx, %%r8 ;\n"
+ "shrdq $38, %%rsi, %%rdx ;\n"
+ "shrdq $25, %%rdi, %%rsi ;\n"
+ "shrq $12, %%rdi ;\n"
+ "movq %%rcx, %%r9 ;\n"
+ "movq %%r8, %%r10 ;\n"
+ "movq %%rdx, %%r11 ;\n"
+ "movq %%rsi, %%r12 ;\n"
+ "movq %%rdi, %%r13 ;\n"
+ "shrq $26, %%r9 ;\n"
+ "shrq $26, %%r10 ;\n"
+ "shrq $26, %%r11 ;\n"
+ "shrq $26, %%r12 ;\n"
+ "shrq $26, %%r13 ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "andl $0x1ffffff, %%r9d ;\n"
+ "andl $0x3ffffff, %%r8d ;\n"
+ "andl $0x1ffffff, %%r10d ;\n"
+ "andl $0x3ffffff, %%edx ;\n"
+ "andl $0x1ffffff, %%r11d ;\n"
+ "andl $0x3ffffff, %%esi ;\n"
+ "andl $0x1ffffff, %%r12d ;\n"
+ "andl $0x3ffffff, %%edi ;\n"
+ "andl $0x1ffffff, %%r13d ;\n"
+ "movl %%ecx, 0(%2) ;\n"
+ "movl %%r9d, 4(%2) ;\n"
+ "movl %%r8d, 8(%2) ;\n"
+ "movl %%r10d, 12(%2) ;\n"
+ "movl %%edx, 16(%2) ;\n"
+ "movl %%r11d, 20(%2) ;\n"
+ "movl %%esi, 24(%2) ;\n"
+ "movl %%r12d, 28(%2) ;\n"
+ "movl %%edi, 32(%2) ;\n"
+ "movl %%r13d, 36(%2) ;\n"
+ "movq %%rax, 40(%2) ;\n"
+
+ /* store xaddy */
+ "movd %%xmm2, %%rcx ;\n"
+ "movd %%xmm2, %%r8 ;\n"
+ "movd %%xmm3, %%rsi ;\n"
+ "pshufd $0xee, %%xmm2, %%xmm2 ;\n"
+ "pshufd $0xee, %%xmm3, %%xmm3 ;\n"
+ "movd %%xmm2, %%rdx ;\n"
+ "movd %%xmm3, %%rdi ;\n"
+ "shrdq $51, %%rdx, %%r8 ;\n"
+ "shrdq $38, %%rsi, %%rdx ;\n"
+ "shrdq $25, %%rdi, %%rsi ;\n"
+ "shrq $12, %%rdi ;\n"
+ "movq %%rcx, %%r9 ;\n"
+ "movq %%r8, %%r10 ;\n"
+ "movq %%rdx, %%r11 ;\n"
+ "movq %%rsi, %%r12 ;\n"
+ "movq %%rdi, %%r13 ;\n"
+ "shrq $26, %%r9 ;\n"
+ "shrq $26, %%r10 ;\n"
+ "shrq $26, %%r11 ;\n"
+ "shrq $26, %%r12 ;\n"
+ "shrq $26, %%r13 ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "andl $0x1ffffff, %%r9d ;\n"
+ "andl $0x3ffffff, %%r8d ;\n"
+ "andl $0x1ffffff, %%r10d ;\n"
+ "andl $0x3ffffff, %%edx ;\n"
+ "andl $0x1ffffff, %%r11d ;\n"
+ "andl $0x3ffffff, %%esi ;\n"
+ "andl $0x1ffffff, %%r12d ;\n"
+ "andl $0x3ffffff, %%edi ;\n"
+ "andl $0x1ffffff, %%r13d ;\n"
+ "movl %%ecx, 48(%2) ;\n"
+ "movl %%r9d, 52(%2) ;\n"
+ "movl %%r8d, 56(%2) ;\n"
+ "movl %%r10d, 60(%2) ;\n"
+ "movl %%edx, 64(%2) ;\n"
+ "movl %%r11d, 68(%2) ;\n"
+ "movl %%esi, 72(%2) ;\n"
+ "movl %%r12d, 76(%2) ;\n"
+ "movl %%edi, 80(%2) ;\n"
+ "movl %%r13d, 84(%2) ;\n"
+ "movq %%rax, 88(%2) ;\n"
+
+ /* extract t2d */
+ "xorq %%rax, %%rax ;\n"
+ "movd %%xmm4, %%rcx ;\n"
+ "movd %%xmm4, %%r8 ;\n"
+ "movd %%xmm5, %%rsi ;\n"
+ "pshufd $0xee, %%xmm4, %%xmm4 ;\n"
+ "pshufd $0xee, %%xmm5, %%xmm5 ;\n"
+ "movd %%xmm4, %%rdx ;\n"
+ "movd %%xmm5, %%rdi ;\n"
+ "shrdq $51, %%rdx, %%r8 ;\n"
+ "shrdq $38, %%rsi, %%rdx ;\n"
+ "shrdq $25, %%rdi, %%rsi ;\n"
+ "shrq $12, %%rdi ;\n"
+ "movq %%rcx, %%r9 ;\n"
+ "movq %%r8, %%r10 ;\n"
+ "movq %%rdx, %%r11 ;\n"
+ "movq %%rsi, %%r12 ;\n"
+ "movq %%rdi, %%r13 ;\n"
+ "shrq $26, %%r9 ;\n"
+ "shrq $26, %%r10 ;\n"
+ "shrq $26, %%r11 ;\n"
+ "shrq $26, %%r12 ;\n"
+ "shrq $26, %%r13 ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "andl $0x1ffffff, %%r9d ;\n"
+ "andl $0x3ffffff, %%r8d ;\n"
+ "andl $0x1ffffff, %%r10d ;\n"
+ "andl $0x3ffffff, %%edx ;\n"
+ "andl $0x1ffffff, %%r11d ;\n"
+ "andl $0x3ffffff, %%esi ;\n"
+ "andl $0x1ffffff, %%r12d ;\n"
+ "andl $0x3ffffff, %%edi ;\n"
+ "andl $0x1ffffff, %%r13d ;\n"
+ "movd %%ecx, %%xmm0 ;\n"
+ "movd %%r9d, %%xmm4 ;\n"
+ "movd %%r8d, %%xmm8 ;\n"
+ "movd %%r10d, %%xmm3 ;\n"
+ "movd %%edx, %%xmm1 ;\n"
+ "movd %%r11d, %%xmm5 ;\n"
+ "movd %%esi, %%xmm6 ;\n"
+ "movd %%r12d, %%xmm7 ;\n"
+ "movd %%edi, %%xmm2 ;\n"
+ "movd %%r13d, %%xmm9 ;\n"
+ "punpckldq %%xmm4, %%xmm0 ;\n"
+ "punpckldq %%xmm3, %%xmm8 ;\n"
+ "punpckldq %%xmm5, %%xmm1 ;\n"
+ "punpckldq %%xmm7, %%xmm6 ;\n"
+ "punpckldq %%xmm9, %%xmm2 ;\n"
+ "punpcklqdq %%xmm8, %%xmm0 ;\n"
+ "punpcklqdq %%xmm6, %%xmm1 ;\n"
+
+ /* set up 2p in to 3/4 */
+ "movl $0x7ffffda, %%ecx ;\n"
+ "movl $0x3fffffe, %%edx ;\n"
+ "movl $0x7fffffe, %%eax ;\n"
+ "movd %%ecx, %%xmm3 ;\n"
+ "movd %%edx, %%xmm5 ;\n"
+ "movd %%eax, %%xmm4 ;\n"
+ "punpckldq %%xmm5, %%xmm3 ;\n"
+ "punpckldq %%xmm5, %%xmm4 ;\n"
+ "punpcklqdq %%xmm4, %%xmm3 ;\n"
+ "movdqa %%xmm4, %%xmm5 ;\n"
+ "punpcklqdq %%xmm4, %%xmm4 ;\n"
+
+ /* subtract and conditionally move */
+ "movl %3, %%ecx ;\n"
+ "sub $1, %%ecx ;\n"
+ "movd %%ecx, %%xmm6 ;\n"
+ "pshufd $0x00, %%xmm6, %%xmm6 ;\n"
+ "movdqa %%xmm6, %%xmm7 ;\n"
+ "psubd %%xmm0, %%xmm3 ;\n"
+ "psubd %%xmm1, %%xmm4 ;\n"
+ "psubd %%xmm2, %%xmm5 ;\n"
+ "pand %%xmm6, %%xmm0 ;\n"
+ "pand %%xmm6, %%xmm1 ;\n"
+ "pand %%xmm6, %%xmm2 ;\n"
+ "pandn %%xmm3, %%xmm6 ;\n"
+ "movdqa %%xmm7, %%xmm3 ;\n"
+ "pandn %%xmm4, %%xmm7 ;\n"
+ "pandn %%xmm5, %%xmm3 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm3, %%xmm2 ;\n"
+
+ /* store t2d */
+ "movdqa %%xmm0, 96(%2) ;\n"
+ "movdqa %%xmm1, 112(%2) ;\n"
+ "movdqa %%xmm2, 128(%2) ;\n"
+ :
+ : "m"(u), "r"(&table[pos * 8]), "r"(t), "m"(sign) /* %0 = u, %1 = table, %2 = t, %3 = sign */
+ :
+ "%rax", "%rcx", "%rdx", "%rdi", "%rsi", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13",
+ "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm14", "%xmm14",
+ "cc", "memory"
+ );
+}
+
+#endif /* defined(ED25519_GCC_64BIT_SSE_CHOOSE) */
+
diff --git a/src/ext/ed25519/donna/ed25519-donna-64bit-tables.h b/src/ext/ed25519/donna/ed25519-donna-64bit-tables.h
new file mode 100644
index 0000000000..4a6ff9edae
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-64bit-tables.h
@@ -0,0 +1,53 @@
+static const ge25519 ge25519_basepoint = {
+ {0x00062d608f25d51a,0x000412a4b4f6592a,0x00075b7171a4b31d,0x0001ff60527118fe,0x000216936d3cd6e5},
+ {0x0006666666666658,0x0004cccccccccccc,0x0001999999999999,0x0003333333333333,0x0006666666666666},
+ {0x0000000000000001,0x0000000000000000,0x0000000000000000,0x0000000000000000,0x0000000000000000},
+ {0x00068ab3a5b7dda3,0x00000eea2a5eadbb,0x0002af8df483c27e,0x000332b375274732,0x00067875f0fd78b7}
+};
+
+static const bignum25519 ge25519_ecd = {
+ 0x00034dca135978a3,0x0001a8283b156ebd,0x0005e7a26001c029,0x000739c663a03cbb,0x00052036cee2b6ff
+};
+
+static const bignum25519 ge25519_ec2d = {
+ 0x00069b9426b2f159,0x00035050762add7a,0x0003cf44c0038052,0x0006738cc7407977,0x0002406d9dc56dff
+};
+
+static const bignum25519 ge25519_sqrtneg1 = {
+ 0x00061b274a0ea0b0,0x0000d5a5fc8f189d,0x0007ef5e9cbd0c60,0x00078595a6804c9e,0x0002b8324804fc1d
+};
+
+static const ge25519_niels ge25519_niels_sliding_multiples[32] = {
+ {{0x00003905d740913e,0x0000ba2817d673a2,0x00023e2827f4e67c,0x000133d2e0c21a34,0x00044fd2f9298f81},{0x000493c6f58c3b85,0x0000df7181c325f7,0x0000f50b0b3e4cb7,0x0005329385a44c32,0x00007cf9d3a33d4b},{0x00011205877aaa68,0x000479955893d579,0x00050d66309b67a0,0x0002d42d0dbee5ee,0x0006f117b689f0c6}},
+ {{0x00011fe8a4fcd265,0x0007bcb8374faacc,0x00052f5af4ef4d4f,0x0005314098f98d10,0x0002ab91587555bd},{0x0005b0a84cee9730,0x00061d10c97155e4,0x0004059cc8096a10,0x00047a608da8014f,0x0007a164e1b9a80f},{0x0006933f0dd0d889,0x00044386bb4c4295,0x0003cb6d3162508c,0x00026368b872a2c6,0x0005a2826af12b9b}},
+ {{0x000182c3a447d6ba,0x00022964e536eff2,0x000192821f540053,0x0002f9f19e788e5c,0x000154a7e73eb1b5},{0x0002bc4408a5bb33,0x000078ebdda05442,0x0002ffb112354123,0x000375ee8df5862d,0x0002945ccf146e20},{0x0003dbf1812a8285,0x0000fa17ba3f9797,0x0006f69cb49c3820,0x00034d5a0db3858d,0x00043aabe696b3bb}},
+ {{0x00072c9aaa3221b1,0x000267774474f74d,0x000064b0e9b28085,0x0003f04ef53b27c9,0x0001d6edd5d2e531},{0x00025cd0944ea3bf,0x00075673b81a4d63,0x000150b925d1c0d4,0x00013f38d9294114,0x000461bea69283c9},{0x00036dc801b8b3a2,0x0000e0a7d4935e30,0x0001deb7cecc0d7d,0x000053a94e20dd2c,0x0007a9fbb1c6a0f9}},
+ {{0x0006217e039d8064,0x0006dea408337e6d,0x00057ac112628206,0x000647cb65e30473,0x00049c05a51fadc9},{0x0006678aa6a8632f,0x0005ea3788d8b365,0x00021bd6d6994279,0x0007ace75919e4e3,0x00034b9ed338add7},{0x0004e8bf9045af1b,0x000514e33a45e0d6,0x0007533c5b8bfe0f,0x000583557b7e14c9,0x00073c172021b008}},
+ {{0x00075b0249864348,0x00052ee11070262b,0x000237ae54fb5acd,0x0003bfd1d03aaab5,0x00018ab598029d5c},{0x000700848a802ade,0x0001e04605c4e5f7,0x0005c0d01b9767fb,0x0007d7889f42388b,0x0004275aae2546d8},{0x00032cc5fd6089e9,0x000426505c949b05,0x00046a18880c7ad2,0x0004a4221888ccda,0x0003dc65522b53df}},
+ {{0x0007013b327fbf93,0x0001336eeded6a0d,0x0002b565a2bbf3af,0x000253ce89591955,0x0000267882d17602},{0x0000c222a2007f6d,0x000356b79bdb77ee,0x00041ee81efe12ce,0x000120a9bd07097d,0x000234fd7eec346f},{0x0000a119732ea378,0x00063bf1ba8e2a6c,0x00069f94cc90df9a,0x000431d1779bfc48,0x000497ba6fdaa097}},
+ {{0x0003cd86468ccf0b,0x00048553221ac081,0x0006c9464b4e0a6e,0x00075fba84180403,0x00043b5cd4218d05},{0x0006cc0313cfeaa0,0x0001a313848da499,0x0007cb534219230a,0x00039596dedefd60,0x00061e22917f12de},{0x0002762f9bd0b516,0x0001c6e7fbddcbb3,0x00075909c3ace2bd,0x00042101972d3ec9,0x000511d61210ae4d}},
+ {{0x000386484420de87,0x0002d6b25db68102,0x000650b4962873c0,0x0004081cfd271394,0x00071a7fe6fe2482},{0x000676ef950e9d81,0x0001b81ae089f258,0x00063c4922951883,0x0002f1d54d9b3237,0x0006d325924ddb85},{0x000182b8a5c8c854,0x00073fcbe5406d8e,0x0005de3430cff451,0x000554b967ac8c41,0x0004746c4b6559ee}},
+ {{0x000546c864741147,0x0003a1df99092690,0x0001ca8cc9f4d6bb,0x00036b7fc9cd3b03,0x000219663497db5e},{0x00077b3c6dc69a2b,0x0004edf13ec2fa6e,0x0004e85ad77beac8,0x0007dba2b28e7bda,0x0005c9a51de34fe9},{0x0000f1cf79f10e67,0x00043ccb0a2b7ea2,0x00005089dfff776a,0x0001dd84e1d38b88,0x0004804503c60822}},
+ {{0x000021d23a36d175,0x0004fd3373c6476d,0x00020e291eeed02a,0x00062f2ecf2e7210,0x000771e098858de4},{0x00049ed02ca37fc7,0x000474c2b5957884,0x0005b8388e816683,0x0004b6c454b76be4,0x000553398a516506},{0x0002f5d278451edf,0x000730b133997342,0x0006965420eb6975,0x000308a3bfa516cf,0x0005a5ed1d68ff5a}},
+ {{0x0005e0c558527359,0x0003395b73afd75c,0x000072afa4e4b970,0x00062214329e0f6d,0x000019b60135fefd},{0x0005122afe150e83,0x0004afc966bb0232,0x0001c478833c8268,0x00017839c3fc148f,0x00044acb897d8bf9},{0x000068145e134b83,0x0001e4860982c3cc,0x000068fb5f13d799,0x0007c9283744547e,0x000150c49fde6ad2}},
+ {{0x0001863c9cdca868,0x0003770e295a1709,0x0000d85a3720fd13,0x0005e0ff1f71ab06,0x00078a6d7791e05f},{0x0003f29509471138,0x000729eeb4ca31cf,0x00069c22b575bfbc,0x0004910857bce212,0x0006b2b5a075bb99},{0x0007704b47a0b976,0x0002ae82e91aab17,0x00050bd6429806cd,0x00068055158fd8ea,0x000725c7ffc4ad55}},
+ {{0x00002bf71cd098c0,0x00049dabcc6cd230,0x00040a6533f905b2,0x000573efac2eb8a4,0x0004cd54625f855f},{0x00026715d1cf99b2,0x0002205441a69c88,0x000448427dcd4b54,0x0001d191e88abdc5,0x000794cc9277cb1f},{0x0006c426c2ac5053,0x0005a65ece4b095e,0x0000c44086f26bb6,0x0007429568197885,0x0007008357b6fcc8}},
+ {{0x00039fbb82584a34,0x00047a568f257a03,0x00014d88091ead91,0x0002145b18b1ce24,0x00013a92a3669d6d},{0x0000672738773f01,0x000752bf799f6171,0x0006b4a6dae33323,0x0007b54696ead1dc,0x00006ef7e9851ad0},{0x0003771cc0577de5,0x0003ca06bb8b9952,0x00000b81c5d50390,0x00043512340780ec,0x0003c296ddf8a2af}},
+ {{0x00034d2ebb1f2541,0x0000e815b723ff9d,0x000286b416e25443,0x0000bdfe38d1bee8,0x0000a892c7007477},{0x000515f9d914a713,0x00073191ff2255d5,0x00054f5cc2a4bdef,0x0003dd57fc118bcf,0x0007a99d393490c7},{0x0002ed2436bda3e8,0x00002afd00f291ea,0x0000be7381dea321,0x0003e952d4b2b193,0x000286762d28302f}},
+ {{0x00058e2bce2ef5bd,0x00068ce8f78c6f8a,0x0006ee26e39261b2,0x00033d0aa50bcf9d,0x0007686f2a3d6f17},{0x000036093ce35b25,0x0003b64d7552e9cf,0x00071ee0fe0b8460,0x00069d0660c969e5,0x00032f1da046a9d9},{0x000512a66d597c6a,0x0000609a70a57551,0x000026c08a3c464c,0x0004531fc8ee39e1,0x000561305f8a9ad2}},
+ {{0x0002cc28e7b0c0d5,0x00077b60eb8a6ce4,0x0004042985c277a6,0x000636657b46d3eb,0x000030a1aef2c57c},{0x0004978dec92aed1,0x000069adae7ca201,0x00011ee923290f55,0x00069641898d916c,0x00000aaec53e35d4},{0x0001f773003ad2aa,0x000005642cc10f76,0x00003b48f82cfca6,0x0002403c10ee4329,0x00020be9c1c24065}},
+ {{0x0000e44ae2025e60,0x0005f97b9727041c,0x0005683472c0ecec,0x000188882eb1ce7c,0x00069764c545067e},{0x000387d8249673a6,0x0005bea8dc927c2a,0x0005bd8ed5650ef0,0x0000ef0e3fcd40e1,0x000750ab3361f0ac},{0x00023283a2f81037,0x000477aff97e23d1,0x0000b8958dbcbb68,0x0000205b97e8add6,0x00054f96b3fb7075}},
+ {{0x0005afc616b11ecd,0x00039f4aec8f22ef,0x0003b39e1625d92e,0x0005f85bd4508873,0x00078e6839fbe85d},{0x0005f20429669279,0x00008fafae4941f5,0x00015d83c4eb7688,0x0001cf379eca4146,0x0003d7fe9c52bb75},{0x00032df737b8856b,0x0000608342f14e06,0x0003967889d74175,0x0001211907fba550,0x00070f268f350088}},
+ {{0x0004112070dcf355,0x0007dcff9c22e464,0x00054ada60e03325,0x00025cd98eef769a,0x000404e56c039b8c},{0x00064583b1805f47,0x00022c1baf832cd0,0x000132c01bd4d717,0x0004ecf4c3a75b8f,0x0007c0d345cfad88},{0x00071f4b8c78338a,0x00062cfc16bc2b23,0x00017cf51280d9aa,0x0003bbae5e20a95a,0x00020d754762aaec}},
+ {{0x0004feb135b9f543,0x00063bd192ad93ae,0x00044e2ea612cdf7,0x000670f4991583ab,0x00038b8ada8790b4},{0x0007c36fc73bb758,0x0004a6c797734bd1,0x0000ef248ab3950e,0x00063154c9a53ec8,0x0002b8f1e46f3cee},{0x00004a9cdf51f95d,0x0005d963fbd596b8,0x00022d9b68ace54a,0x0004a98e8836c599,0x000049aeb32ceba1}},
+ {{0x00067d3c63dcfe7e,0x000112f0adc81aee,0x00053df04c827165,0x0002fe5b33b430f0,0x00051c665e0c8d62},{0x00007d0b75fc7931,0x00016f4ce4ba754a,0x0005ace4c03fbe49,0x00027e0ec12a159c,0x000795ee17530f67},{0x00025b0a52ecbd81,0x0005dc0695fce4a9,0x0003b928c575047d,0x00023bf3512686e5,0x0006cd19bf49dc54}},
+ {{0x0007619052179ca3,0x0000c16593f0afd0,0x000265c4795c7428,0x00031c40515d5442,0x0007520f3db40b2e},{0x0006612165afc386,0x0001171aa36203ff,0x0002642ea820a8aa,0x0001f3bb7b313f10,0x0005e01b3a7429e4},{0x00050be3d39357a1,0x0003ab33d294a7b6,0x0004c479ba59edb3,0x0004c30d184d326f,0x00071092c9ccef3c}},
+ {{0x0000523f0364918c,0x000687f56d638a7b,0x00020796928ad013,0x0005d38405a54f33,0x0000ea15b03d0257},{0x0003d8ac74051dcf,0x00010ab6f543d0ad,0x0005d0f3ac0fda90,0x0005ef1d2573e5e4,0x0004173a5bb7137a},{0x00056e31f0f9218a,0x0005635f88e102f8,0x0002cbc5d969a5b8,0x000533fbc98b347a,0x0005fc565614a4e3}},
+ {{0x0006570dc46d7ae5,0x00018a9f1b91e26d,0x000436b6183f42ab,0x000550acaa4f8198,0x00062711c414c454},{0x0002e1e67790988e,0x0001e38b9ae44912,0x000648fbb4075654,0x00028df1d840cd72,0x0003214c7409d466},{0x0001827406651770,0x0004d144f286c265,0x00017488f0ee9281,0x00019e6cdb5c760c,0x0005bea94073ecb8}},
+ {{0x0005bf0912c89be4,0x00062fadcaf38c83,0x00025ec196b3ce2c,0x00077655ff4f017b,0x0003aacd5c148f61},{0x0000ce63f343d2f8,0x0001e0a87d1e368e,0x000045edbc019eea,0x0006979aed28d0d1,0x0004ad0785944f1b},{0x00063b34c3318301,0x0000e0e62d04d0b1,0x000676a233726701,0x00029e9a042d9769,0x0003aff0cb1d9028}},
+ {{0x0005c7eb3a20405e,0x0005fdb5aad930f8,0x0004a757e63b8c47,0x00028e9492972456,0x000110e7e86f4cd2},{0x0006430bf4c53505,0x000264c3e4507244,0x00074c9f19a39270,0x00073f84f799bc47,0x0002ccf9f732bd99},{0x0000d89ed603f5e4,0x00051e1604018af8,0x0000b8eedc4a2218,0x00051ba98b9384d0,0x00005c557e0b9693}},
+ {{0x0001ce311fc97e6f,0x0006023f3fb5db1f,0x0007b49775e8fc98,0x0003ad70adbf5045,0x0006e154c178fe98},{0x0006bbb089c20eb0,0x0006df41fb0b9eee,0x00051087ed87e16f,0x000102db5c9fa731,0x000289fef0841861},{0x00016336fed69abf,0x0004f066b929f9ec,0x0004e9ff9e6c5b93,0x00018c89bc4bb2ba,0x0006afbf642a95ca}},
+ {{0x0000de0c62f5d2c1,0x00049601cf734fb5,0x0006b5c38263f0f6,0x0004623ef5b56d06,0x0000db4b851b9503},{0x00055070f913a8cc,0x000765619eac2bbc,0x0003ab5225f47459,0x00076ced14ab5b48,0x00012c093cedb801},{0x00047f9308b8190f,0x000414235c621f82,0x00031f5ff41a5a76,0x0006736773aab96d,0x00033aa8799c6635}},
+ {{0x0007f51ebd085cf2,0x00012cfa67e3f5e1,0x0001800cf1e3d46a,0x00054337615ff0a8,0x000233c6f29e8e21},{0x0000f588fc156cb1,0x000363414da4f069,0x0007296ad9b68aea,0x0004d3711316ae43,0x000212cd0c1c8d58},{0x0004d5107f18c781,0x00064a4fd3a51a5e,0x0004f4cd0448bb37,0x000671d38543151e,0x0001db7778911914}},
+ {{0x000352397c6bc26f,0x00018a7aa0227bbe,0x0005e68cc1ea5f8b,0x0006fe3e3a7a1d5f,0x00031ad97ad26e2a},{0x00014769dd701ab6,0x00028339f1b4b667,0x0004ab214b8ae37b,0x00025f0aefa0b0fe,0x0007ae2ca8a017d2},{0x000017ed0920b962,0x000187e33b53b6fd,0x00055829907a1463,0x000641f248e0a792,0x0001ed1fc53a6622}}
+};
diff --git a/src/ext/ed25519/donna/ed25519-donna-64bit-x86-32bit.h b/src/ext/ed25519/donna/ed25519-donna-64bit-x86-32bit.h
new file mode 100644
index 0000000000..1ce109c5b7
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-64bit-x86-32bit.h
@@ -0,0 +1,435 @@
+#if defined(ED25519_GCC_64BIT_32BIT_CHOOSE)
+
+#define HAVE_GE25519_SCALARMULT_BASE_CHOOSE_NIELS
+
+DONNA_NOINLINE static void
+ge25519_scalarmult_base_choose_niels(ge25519_niels *t, const uint8_t table[256][96], uint32_t pos, signed char b) {
+ int64_t breg = (int64_t)b;
+ uint64_t sign = (uint64_t)breg >> 63;
+ uint64_t mask = ~(sign - 1);
+ uint64_t u = (breg + mask) ^ mask;
+
+ __asm__ __volatile__ (
+ /* ysubx+xaddy+t2d */
+ "movq %0, %%rax ;\n"
+ "movd %%rax, %%xmm14 ;\n"
+ "pshufd $0x00, %%xmm14, %%xmm14 ;\n"
+ "pxor %%xmm0, %%xmm0 ;\n"
+ "pxor %%xmm1, %%xmm1 ;\n"
+ "pxor %%xmm2, %%xmm2 ;\n"
+ "pxor %%xmm3, %%xmm3 ;\n"
+ "pxor %%xmm4, %%xmm4 ;\n"
+ "pxor %%xmm5, %%xmm5 ;\n"
+
+ /* 0 */
+ "movq $0, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movq $1, %%rax ;\n"
+ "movd %%rax, %%xmm6 ;\n"
+ "pxor %%xmm7, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm6, %%xmm2 ;\n"
+ "por %%xmm7, %%xmm3 ;\n"
+
+ /* 1 */
+ "movq $1, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 0(%1), %%xmm6 ;\n"
+ "movdqa 16(%1), %%xmm7 ;\n"
+ "movdqa 32(%1), %%xmm8 ;\n"
+ "movdqa 48(%1), %%xmm9 ;\n"
+ "movdqa 64(%1), %%xmm10 ;\n"
+ "movdqa 80(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 2 */
+ "movq $2, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 96(%1), %%xmm6 ;\n"
+ "movdqa 112(%1), %%xmm7 ;\n"
+ "movdqa 128(%1), %%xmm8 ;\n"
+ "movdqa 144(%1), %%xmm9 ;\n"
+ "movdqa 160(%1), %%xmm10 ;\n"
+ "movdqa 176(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 3 */
+ "movq $3, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 192(%1), %%xmm6 ;\n"
+ "movdqa 208(%1), %%xmm7 ;\n"
+ "movdqa 224(%1), %%xmm8 ;\n"
+ "movdqa 240(%1), %%xmm9 ;\n"
+ "movdqa 256(%1), %%xmm10 ;\n"
+ "movdqa 272(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 4 */
+ "movq $4, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 288(%1), %%xmm6 ;\n"
+ "movdqa 304(%1), %%xmm7 ;\n"
+ "movdqa 320(%1), %%xmm8 ;\n"
+ "movdqa 336(%1), %%xmm9 ;\n"
+ "movdqa 352(%1), %%xmm10 ;\n"
+ "movdqa 368(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 5 */
+ "movq $5, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 384(%1), %%xmm6 ;\n"
+ "movdqa 400(%1), %%xmm7 ;\n"
+ "movdqa 416(%1), %%xmm8 ;\n"
+ "movdqa 432(%1), %%xmm9 ;\n"
+ "movdqa 448(%1), %%xmm10 ;\n"
+ "movdqa 464(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 6 */
+ "movq $6, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 480(%1), %%xmm6 ;\n"
+ "movdqa 496(%1), %%xmm7 ;\n"
+ "movdqa 512(%1), %%xmm8 ;\n"
+ "movdqa 528(%1), %%xmm9 ;\n"
+ "movdqa 544(%1), %%xmm10 ;\n"
+ "movdqa 560(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 7 */
+ "movq $7, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 576(%1), %%xmm6 ;\n"
+ "movdqa 592(%1), %%xmm7 ;\n"
+ "movdqa 608(%1), %%xmm8 ;\n"
+ "movdqa 624(%1), %%xmm9 ;\n"
+ "movdqa 640(%1), %%xmm10 ;\n"
+ "movdqa 656(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 8 */
+ "movq $8, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 672(%1), %%xmm6 ;\n"
+ "movdqa 688(%1), %%xmm7 ;\n"
+ "movdqa 704(%1), %%xmm8 ;\n"
+ "movdqa 720(%1), %%xmm9 ;\n"
+ "movdqa 736(%1), %%xmm10 ;\n"
+ "movdqa 752(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* conditionally swap ysubx and xaddy */
+ "movq %3, %%rax ;\n"
+ "xorq $1, %%rax ;\n"
+ "movd %%rax, %%xmm14 ;\n"
+ "pxor %%xmm15, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm14, %%xmm14 ;\n"
+ "pxor %%xmm0, %%xmm2 ;\n"
+ "pxor %%xmm1, %%xmm3 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa %%xmm2, %%xmm6 ;\n"
+ "movdqa %%xmm3, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pxor %%xmm6, %%xmm0 ;\n"
+ "pxor %%xmm7, %%xmm1 ;\n"
+ "pxor %%xmm0, %%xmm2 ;\n"
+ "pxor %%xmm1, %%xmm3 ;\n"
+
+ /* store ysubx */
+ "xorq %%rax, %%rax ;\n"
+ "movd %%xmm0, %%rcx ;\n"
+ "movd %%xmm0, %%r8 ;\n"
+ "movd %%xmm1, %%rsi ;\n"
+ "pshufd $0xee, %%xmm0, %%xmm0 ;\n"
+ "pshufd $0xee, %%xmm1, %%xmm1 ;\n"
+ "movd %%xmm0, %%rdx ;\n"
+ "movd %%xmm1, %%rdi ;\n"
+ "shrdq $51, %%rdx, %%r8 ;\n"
+ "shrdq $38, %%rsi, %%rdx ;\n"
+ "shrdq $25, %%rdi, %%rsi ;\n"
+ "shrq $12, %%rdi ;\n"
+ "movq %%rcx, %%r9 ;\n"
+ "movq %%r8, %%r10 ;\n"
+ "movq %%rdx, %%r11 ;\n"
+ "movq %%rsi, %%r12 ;\n"
+ "movq %%rdi, %%r13 ;\n"
+ "shrq $26, %%r9 ;\n"
+ "shrq $26, %%r10 ;\n"
+ "shrq $26, %%r11 ;\n"
+ "shrq $26, %%r12 ;\n"
+ "shrq $26, %%r13 ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "andl $0x1ffffff, %%r9d ;\n"
+ "andl $0x3ffffff, %%r8d ;\n"
+ "andl $0x1ffffff, %%r10d ;\n"
+ "andl $0x3ffffff, %%edx ;\n"
+ "andl $0x1ffffff, %%r11d ;\n"
+ "andl $0x3ffffff, %%esi ;\n"
+ "andl $0x1ffffff, %%r12d ;\n"
+ "andl $0x3ffffff, %%edi ;\n"
+ "andl $0x1ffffff, %%r13d ;\n"
+ "movl %%ecx, 0(%2) ;\n"
+ "movl %%r9d, 4(%2) ;\n"
+ "movl %%r8d, 8(%2) ;\n"
+ "movl %%r10d, 12(%2) ;\n"
+ "movl %%edx, 16(%2) ;\n"
+ "movl %%r11d, 20(%2) ;\n"
+ "movl %%esi, 24(%2) ;\n"
+ "movl %%r12d, 28(%2) ;\n"
+ "movl %%edi, 32(%2) ;\n"
+ "movl %%r13d, 36(%2) ;\n"
+
+ /* store xaddy */
+ "movd %%xmm2, %%rcx ;\n"
+ "movd %%xmm2, %%r8 ;\n"
+ "movd %%xmm3, %%rsi ;\n"
+ "pshufd $0xee, %%xmm2, %%xmm2 ;\n"
+ "pshufd $0xee, %%xmm3, %%xmm3 ;\n"
+ "movd %%xmm2, %%rdx ;\n"
+ "movd %%xmm3, %%rdi ;\n"
+ "shrdq $51, %%rdx, %%r8 ;\n"
+ "shrdq $38, %%rsi, %%rdx ;\n"
+ "shrdq $25, %%rdi, %%rsi ;\n"
+ "shrq $12, %%rdi ;\n"
+ "movq %%rcx, %%r9 ;\n"
+ "movq %%r8, %%r10 ;\n"
+ "movq %%rdx, %%r11 ;\n"
+ "movq %%rsi, %%r12 ;\n"
+ "movq %%rdi, %%r13 ;\n"
+ "shrq $26, %%r9 ;\n"
+ "shrq $26, %%r10 ;\n"
+ "shrq $26, %%r11 ;\n"
+ "shrq $26, %%r12 ;\n"
+ "shrq $26, %%r13 ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "andl $0x1ffffff, %%r9d ;\n"
+ "andl $0x3ffffff, %%r8d ;\n"
+ "andl $0x1ffffff, %%r10d ;\n"
+ "andl $0x3ffffff, %%edx ;\n"
+ "andl $0x1ffffff, %%r11d ;\n"
+ "andl $0x3ffffff, %%esi ;\n"
+ "andl $0x1ffffff, %%r12d ;\n"
+ "andl $0x3ffffff, %%edi ;\n"
+ "andl $0x1ffffff, %%r13d ;\n"
+ "movl %%ecx, 40(%2) ;\n"
+ "movl %%r9d, 44(%2) ;\n"
+ "movl %%r8d, 48(%2) ;\n"
+ "movl %%r10d, 52(%2) ;\n"
+ "movl %%edx, 56(%2) ;\n"
+ "movl %%r11d, 60(%2) ;\n"
+ "movl %%esi, 64(%2) ;\n"
+ "movl %%r12d, 68(%2) ;\n"
+ "movl %%edi, 72(%2) ;\n"
+ "movl %%r13d, 76(%2) ;\n"
+
+ /* extract t2d */
+ "xorq %%rax, %%rax ;\n"
+ "movd %%xmm4, %%rcx ;\n"
+ "movd %%xmm4, %%r8 ;\n"
+ "movd %%xmm5, %%rsi ;\n"
+ "pshufd $0xee, %%xmm4, %%xmm4 ;\n"
+ "pshufd $0xee, %%xmm5, %%xmm5 ;\n"
+ "movd %%xmm4, %%rdx ;\n"
+ "movd %%xmm5, %%rdi ;\n"
+ "shrdq $51, %%rdx, %%r8 ;\n"
+ "shrdq $38, %%rsi, %%rdx ;\n"
+ "shrdq $25, %%rdi, %%rsi ;\n"
+ "shrq $12, %%rdi ;\n"
+ "movq %%rcx, %%r9 ;\n"
+ "movq %%r8, %%r10 ;\n"
+ "movq %%rdx, %%r11 ;\n"
+ "movq %%rsi, %%r12 ;\n"
+ "movq %%rdi, %%r13 ;\n"
+ "shrq $26, %%r9 ;\n"
+ "shrq $26, %%r10 ;\n"
+ "shrq $26, %%r11 ;\n"
+ "shrq $26, %%r12 ;\n"
+ "shrq $26, %%r13 ;\n"
+ "andl $0x3ffffff, %%ecx ;\n"
+ "andl $0x1ffffff, %%r9d ;\n"
+ "andl $0x3ffffff, %%r8d ;\n"
+ "andl $0x1ffffff, %%r10d ;\n"
+ "andl $0x3ffffff, %%edx ;\n"
+ "andl $0x1ffffff, %%r11d ;\n"
+ "andl $0x3ffffff, %%esi ;\n"
+ "andl $0x1ffffff, %%r12d ;\n"
+ "andl $0x3ffffff, %%edi ;\n"
+ "andl $0x1ffffff, %%r13d ;\n"
+ "movd %%ecx, %%xmm0 ;\n"
+ "movd %%r9d, %%xmm4 ;\n"
+ "movd %%r8d, %%xmm8 ;\n"
+ "movd %%r10d, %%xmm3 ;\n"
+ "movd %%edx, %%xmm1 ;\n"
+ "movd %%r11d, %%xmm5 ;\n"
+ "movd %%esi, %%xmm6 ;\n"
+ "movd %%r12d, %%xmm7 ;\n"
+ "movd %%edi, %%xmm2 ;\n"
+ "movd %%r13d, %%xmm9 ;\n"
+ "punpckldq %%xmm4, %%xmm0 ;\n"
+ "punpckldq %%xmm3, %%xmm8 ;\n"
+ "punpckldq %%xmm5, %%xmm1 ;\n"
+ "punpckldq %%xmm7, %%xmm6 ;\n"
+ "punpckldq %%xmm9, %%xmm2 ;\n"
+ "punpcklqdq %%xmm8, %%xmm0 ;\n"
+ "punpcklqdq %%xmm6, %%xmm1 ;\n"
+
+ /* set up 2p in to 3/4 */
+ "movl $0x7ffffda, %%ecx ;\n"
+ "movl $0x3fffffe, %%edx ;\n"
+ "movl $0x7fffffe, %%eax ;\n"
+ "movd %%ecx, %%xmm3 ;\n"
+ "movd %%edx, %%xmm5 ;\n"
+ "movd %%eax, %%xmm4 ;\n"
+ "punpckldq %%xmm5, %%xmm3 ;\n"
+ "punpckldq %%xmm5, %%xmm4 ;\n"
+ "punpcklqdq %%xmm4, %%xmm3 ;\n"
+ "movdqa %%xmm4, %%xmm5 ;\n"
+ "punpcklqdq %%xmm4, %%xmm4 ;\n"
+
+ /* subtract and conditionally move */
+ "movl %3, %%ecx ;\n"
+ "sub $1, %%ecx ;\n"
+ "movd %%ecx, %%xmm6 ;\n"
+ "pshufd $0x00, %%xmm6, %%xmm6 ;\n"
+ "movdqa %%xmm6, %%xmm7 ;\n"
+ "psubd %%xmm0, %%xmm3 ;\n"
+ "psubd %%xmm1, %%xmm4 ;\n"
+ "psubd %%xmm2, %%xmm5 ;\n"
+ "pand %%xmm6, %%xmm0 ;\n"
+ "pand %%xmm6, %%xmm1 ;\n"
+ "pand %%xmm6, %%xmm2 ;\n"
+ "pandn %%xmm3, %%xmm6 ;\n"
+ "movdqa %%xmm7, %%xmm3 ;\n"
+ "pandn %%xmm4, %%xmm7 ;\n"
+ "pandn %%xmm5, %%xmm3 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm3, %%xmm2 ;\n"
+
+ /* store t2d */
+ "movdqa %%xmm0, 80(%2) ;\n"
+ "movdqa %%xmm1, 96(%2) ;\n"
+ "movd %%xmm2, %%rax ;\n"
+ "movq %%rax, 112(%2) ;\n"
+ :
+ : "m"(u), "r"(&table[pos * 8]), "r"(t), "m"(sign) /* %0 = u, %1 = table, %2 = t, %3 = sign */
+ :
+ "%rax", "%rcx", "%rdx", "%rdi", "%rsi", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13",
+ "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm14", "%xmm14",
+ "cc", "memory"
+ );
+}
+
+#endif /* defined(ED25519_GCC_64BIT_32BIT_CHOOSE) */
+
diff --git a/src/ext/ed25519/donna/ed25519-donna-64bit-x86.h b/src/ext/ed25519/donna/ed25519-donna-64bit-x86.h
new file mode 100644
index 0000000000..30bd472762
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-64bit-x86.h
@@ -0,0 +1,351 @@
+#if defined(ED25519_GCC_64BIT_X86_CHOOSE)
+
+#define HAVE_GE25519_SCALARMULT_BASE_CHOOSE_NIELS
+
+DONNA_NOINLINE static void
+ge25519_scalarmult_base_choose_niels(ge25519_niels *t, const uint8_t table[256][96], uint32_t pos, signed char b) {
+ int64_t breg = (int64_t)b;
+ uint64_t sign = (uint64_t)breg >> 63;
+ uint64_t mask = ~(sign - 1);
+ uint64_t u = (breg + mask) ^ mask;
+
+ __asm__ __volatile__ (
+ /* ysubx+xaddy+t2d */
+ "movq %0, %%rax ;\n"
+ "movd %%rax, %%xmm14 ;\n"
+ "pshufd $0x00, %%xmm14, %%xmm14 ;\n"
+ "pxor %%xmm0, %%xmm0 ;\n"
+ "pxor %%xmm1, %%xmm1 ;\n"
+ "pxor %%xmm2, %%xmm2 ;\n"
+ "pxor %%xmm3, %%xmm3 ;\n"
+ "pxor %%xmm4, %%xmm4 ;\n"
+ "pxor %%xmm5, %%xmm5 ;\n"
+
+ /* 0 */
+ "movq $0, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movq $1, %%rax ;\n"
+ "movd %%rax, %%xmm6 ;\n"
+ "pxor %%xmm7, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm6, %%xmm2 ;\n"
+ "por %%xmm7, %%xmm3 ;\n"
+
+ /* 1 */
+ "movq $1, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 0(%1), %%xmm6 ;\n"
+ "movdqa 16(%1), %%xmm7 ;\n"
+ "movdqa 32(%1), %%xmm8 ;\n"
+ "movdqa 48(%1), %%xmm9 ;\n"
+ "movdqa 64(%1), %%xmm10 ;\n"
+ "movdqa 80(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 2 */
+ "movq $2, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 96(%1), %%xmm6 ;\n"
+ "movdqa 112(%1), %%xmm7 ;\n"
+ "movdqa 128(%1), %%xmm8 ;\n"
+ "movdqa 144(%1), %%xmm9 ;\n"
+ "movdqa 160(%1), %%xmm10 ;\n"
+ "movdqa 176(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 3 */
+ "movq $3, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 192(%1), %%xmm6 ;\n"
+ "movdqa 208(%1), %%xmm7 ;\n"
+ "movdqa 224(%1), %%xmm8 ;\n"
+ "movdqa 240(%1), %%xmm9 ;\n"
+ "movdqa 256(%1), %%xmm10 ;\n"
+ "movdqa 272(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 4 */
+ "movq $4, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 288(%1), %%xmm6 ;\n"
+ "movdqa 304(%1), %%xmm7 ;\n"
+ "movdqa 320(%1), %%xmm8 ;\n"
+ "movdqa 336(%1), %%xmm9 ;\n"
+ "movdqa 352(%1), %%xmm10 ;\n"
+ "movdqa 368(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 5 */
+ "movq $5, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 384(%1), %%xmm6 ;\n"
+ "movdqa 400(%1), %%xmm7 ;\n"
+ "movdqa 416(%1), %%xmm8 ;\n"
+ "movdqa 432(%1), %%xmm9 ;\n"
+ "movdqa 448(%1), %%xmm10 ;\n"
+ "movdqa 464(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 6 */
+ "movq $6, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 480(%1), %%xmm6 ;\n"
+ "movdqa 496(%1), %%xmm7 ;\n"
+ "movdqa 512(%1), %%xmm8 ;\n"
+ "movdqa 528(%1), %%xmm9 ;\n"
+ "movdqa 544(%1), %%xmm10 ;\n"
+ "movdqa 560(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 7 */
+ "movq $7, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 576(%1), %%xmm6 ;\n"
+ "movdqa 592(%1), %%xmm7 ;\n"
+ "movdqa 608(%1), %%xmm8 ;\n"
+ "movdqa 624(%1), %%xmm9 ;\n"
+ "movdqa 640(%1), %%xmm10 ;\n"
+ "movdqa 656(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* 8 */
+ "movq $8, %%rax ;\n"
+ "movd %%rax, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm15, %%xmm15 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa 672(%1), %%xmm6 ;\n"
+ "movdqa 688(%1), %%xmm7 ;\n"
+ "movdqa 704(%1), %%xmm8 ;\n"
+ "movdqa 720(%1), %%xmm9 ;\n"
+ "movdqa 736(%1), %%xmm10 ;\n"
+ "movdqa 752(%1), %%xmm11 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm8 ;\n"
+ "pand %%xmm15, %%xmm9 ;\n"
+ "pand %%xmm15, %%xmm10 ;\n"
+ "pand %%xmm15, %%xmm11 ;\n"
+ "por %%xmm6, %%xmm0 ;\n"
+ "por %%xmm7, %%xmm1 ;\n"
+ "por %%xmm8, %%xmm2 ;\n"
+ "por %%xmm9, %%xmm3 ;\n"
+ "por %%xmm10, %%xmm4 ;\n"
+ "por %%xmm11, %%xmm5 ;\n"
+
+ /* conditionally swap ysubx and xaddy */
+ "movq %3, %%rax ;\n"
+ "xorq $1, %%rax ;\n"
+ "movd %%rax, %%xmm14 ;\n"
+ "pxor %%xmm15, %%xmm15 ;\n"
+ "pshufd $0x00, %%xmm14, %%xmm14 ;\n"
+ "pxor %%xmm0, %%xmm2 ;\n"
+ "pxor %%xmm1, %%xmm3 ;\n"
+ "pcmpeqd %%xmm14, %%xmm15 ;\n"
+ "movdqa %%xmm2, %%xmm6 ;\n"
+ "movdqa %%xmm3, %%xmm7 ;\n"
+ "pand %%xmm15, %%xmm6 ;\n"
+ "pand %%xmm15, %%xmm7 ;\n"
+ "pxor %%xmm6, %%xmm0 ;\n"
+ "pxor %%xmm7, %%xmm1 ;\n"
+ "pxor %%xmm0, %%xmm2 ;\n"
+ "pxor %%xmm1, %%xmm3 ;\n"
+
+ /* store ysubx */
+ "movq $0x7ffffffffffff, %%rax ;\n"
+ "movd %%xmm0, %%rcx ;\n"
+ "movd %%xmm0, %%r8 ;\n"
+ "movd %%xmm1, %%rsi ;\n"
+ "pshufd $0xee, %%xmm0, %%xmm0 ;\n"
+ "pshufd $0xee, %%xmm1, %%xmm1 ;\n"
+ "movd %%xmm0, %%rdx ;\n"
+ "movd %%xmm1, %%rdi ;\n"
+ "shrdq $51, %%rdx, %%r8 ;\n"
+ "shrdq $38, %%rsi, %%rdx ;\n"
+ "shrdq $25, %%rdi, %%rsi ;\n"
+ "shrq $12, %%rdi ;\n"
+ "andq %%rax, %%rcx ;\n"
+ "andq %%rax, %%r8 ;\n"
+ "andq %%rax, %%rdx ;\n"
+ "andq %%rax, %%rsi ;\n"
+ "andq %%rax, %%rdi ;\n"
+ "movq %%rcx, 0(%2) ;\n"
+ "movq %%r8, 8(%2) ;\n"
+ "movq %%rdx, 16(%2) ;\n"
+ "movq %%rsi, 24(%2) ;\n"
+ "movq %%rdi, 32(%2) ;\n"
+
+ /* store xaddy */
+ "movq $0x7ffffffffffff, %%rax ;\n"
+ "movd %%xmm2, %%rcx ;\n"
+ "movd %%xmm2, %%r8 ;\n"
+ "movd %%xmm3, %%rsi ;\n"
+ "pshufd $0xee, %%xmm2, %%xmm2 ;\n"
+ "pshufd $0xee, %%xmm3, %%xmm3 ;\n"
+ "movd %%xmm2, %%rdx ;\n"
+ "movd %%xmm3, %%rdi ;\n"
+ "shrdq $51, %%rdx, %%r8 ;\n"
+ "shrdq $38, %%rsi, %%rdx ;\n"
+ "shrdq $25, %%rdi, %%rsi ;\n"
+ "shrq $12, %%rdi ;\n"
+ "andq %%rax, %%rcx ;\n"
+ "andq %%rax, %%r8 ;\n"
+ "andq %%rax, %%rdx ;\n"
+ "andq %%rax, %%rsi ;\n"
+ "andq %%rax, %%rdi ;\n"
+ "movq %%rcx, 40(%2) ;\n"
+ "movq %%r8, 48(%2) ;\n"
+ "movq %%rdx, 56(%2) ;\n"
+ "movq %%rsi, 64(%2) ;\n"
+ "movq %%rdi, 72(%2) ;\n"
+
+ /* extract t2d */
+ "movq $0x7ffffffffffff, %%rax ;\n"
+ "movd %%xmm4, %%rcx ;\n"
+ "movd %%xmm4, %%r8 ;\n"
+ "movd %%xmm5, %%rsi ;\n"
+ "pshufd $0xee, %%xmm4, %%xmm4 ;\n"
+ "pshufd $0xee, %%xmm5, %%xmm5 ;\n"
+ "movd %%xmm4, %%rdx ;\n"
+ "movd %%xmm5, %%rdi ;\n"
+ "shrdq $51, %%rdx, %%r8 ;\n"
+ "shrdq $38, %%rsi, %%rdx ;\n"
+ "shrdq $25, %%rdi, %%rsi ;\n"
+ "shrq $12, %%rdi ;\n"
+ "andq %%rax, %%rcx ;\n"
+ "andq %%rax, %%r8 ;\n"
+ "andq %%rax, %%rdx ;\n"
+ "andq %%rax, %%rsi ;\n"
+ "andq %%rax, %%rdi ;\n"
+
+ /* conditionally negate t2d */
+ "movq %3, %%rax ;\n"
+ "movq $0xfffffffffffda, %%r9 ;\n"
+ "movq $0xffffffffffffe, %%r10 ;\n"
+ "movq %%r10, %%r11 ;\n"
+ "movq %%r10, %%r12 ;\n"
+ "movq %%r10, %%r13 ;\n"
+ "subq %%rcx, %%r9 ;\n"
+ "subq %%r8, %%r10 ;\n"
+ "subq %%rdx, %%r11 ;\n"
+ "subq %%rsi, %%r12 ;\n"
+ "subq %%rdi, %%r13 ;\n"
+ "cmpq $1, %%rax ;\n"
+ "cmove %%r9, %%rcx ;\n"
+ "cmove %%r10, %%r8 ;\n"
+ "cmove %%r11, %%rdx ;\n"
+ "cmove %%r12, %%rsi ;\n"
+ "cmove %%r13, %%rdi ;\n"
+
+ /* store t2d */
+ "movq %%rcx, 80(%2) ;\n"
+ "movq %%r8, 88(%2) ;\n"
+ "movq %%rdx, 96(%2) ;\n"
+ "movq %%rsi, 104(%2) ;\n"
+ "movq %%rdi, 112(%2) ;\n"
+ :
+ : "m"(u), "r"(&table[pos * 8]), "r"(t), "m"(sign) /* %0 = u, %1 = table, %2 = t, %3 = sign */
+ :
+ "%rax", "%rcx", "%rdx", "%rdi", "%rsi", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13",
+ "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm14", "%xmm14",
+ "cc", "memory"
+ );
+}
+
+#endif /* defined(ED25519_GCC_64BIT_X86_CHOOSE) */
+
diff --git a/src/ext/ed25519/donna/ed25519-donna-basepoint-table.h b/src/ext/ed25519/donna/ed25519-donna-basepoint-table.h
new file mode 100644
index 0000000000..41dcd526a2
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-basepoint-table.h
@@ -0,0 +1,259 @@
+/* multiples of the base point in packed {ysubx, xaddy, t2d} form */
+static const uint8_t ALIGN(16) ge25519_niels_base_multiples[256][96] = {
+ {0x3e,0x91,0x40,0xd7,0x05,0x39,0x10,0x9d,0xb3,0xbe,0x40,0xd1,0x05,0x9f,0x39,0xfd,0x09,0x8a,0x8f,0x68,0x34,0x84,0xc1,0xa5,0x67,0x12,0xf8,0x98,0x92,0x2f,0xfd,0x44,0x85,0x3b,0x8c,0xf5,0xc6,0x93,0xbc,0x2f,0x19,0x0e,0x8c,0xfb,0xc6,0x2d,0x93,0xcf,0xc2,0x42,0x3d,0x64,0x98,0x48,0x0b,0x27,0x65,0xba,0xd4,0x33,0x3a,0x9d,0xcf,0x07,0x59,0xbb,0x6f,0x4b,0x67,0x15,0xbd,0xdb,0xea,0xa5,0xa2,0xee,0x00,0x3f,0xe1,0x41,0xfa,0xc6,0x57,0xc9,0x1c,0x9d,0xd4,0xcd,0xca,0xec,0x16,0xaf,0x1f,0xbe,0x0e,0x4f},
+ {0xa8,0xd5,0xb4,0x42,0x60,0xa5,0x99,0x8a,0xf6,0xac,0x60,0x4e,0x0c,0x81,0x2b,0x8f,0xaa,0x37,0x6e,0xb1,0x6b,0x23,0x9e,0xe0,0x55,0x25,0xc9,0x69,0xa6,0x95,0xb5,0x6b,0xd7,0x71,0x3c,0x93,0xfc,0xe7,0x24,0x92,0xb5,0xf5,0x0f,0x7a,0x96,0x9d,0x46,0x9f,0x02,0x07,0xd6,0xe1,0x65,0x9a,0xa6,0x5a,0x2e,0x2e,0x7d,0xa8,0x3f,0x06,0x0c,0x59,0x02,0x68,0xd3,0xda,0xaa,0x7e,0x34,0x6e,0x05,0x48,0xee,0x83,0x93,0x59,0xf3,0xba,0x26,0x68,0x07,0xe6,0x10,0xbe,0xca,0x3b,0xb8,0xd1,0x5e,0x16,0x0a,0x4f,0x31,0x49},
+ {0x65,0xd2,0xfc,0xa4,0xe8,0x1f,0x61,0x56,0x7d,0xba,0xc1,0xe5,0xfd,0x53,0xd3,0x3b,0xbd,0xd6,0x4b,0x21,0x1a,0xf3,0x31,0x81,0x62,0xda,0x5b,0x55,0x87,0x15,0xb9,0x2a,0x30,0x97,0xee,0x4c,0xa8,0xb0,0x25,0xaf,0x8a,0x4b,0x86,0xe8,0x30,0x84,0x5a,0x02,0x32,0x67,0x01,0x9f,0x02,0x50,0x1b,0xc1,0xf4,0xf8,0x80,0x9a,0x1b,0x4e,0x16,0x7a,0x34,0x48,0x67,0xf1,0xf4,0x11,0xf2,0x9b,0x95,0xf8,0x2d,0xf6,0x17,0x6b,0x4e,0xb8,0x4e,0x2a,0x72,0x5b,0x07,0x6f,0xde,0xd7,0x21,0x2a,0xbb,0x63,0xb9,0x04,0x9a,0x54},
+ {0xbf,0x18,0x68,0x05,0x0a,0x05,0xfe,0x95,0xa9,0xfa,0x60,0x56,0x71,0x89,0x7e,0x32,0x73,0x50,0xa0,0x06,0xcd,0xe3,0xe8,0xc3,0x9a,0xa4,0x45,0x74,0x4c,0x3f,0x93,0x27,0x9f,0x09,0xfc,0x8e,0xb9,0x51,0x73,0x28,0x38,0x25,0xfd,0x7d,0xf4,0xc6,0x65,0x67,0x65,0x92,0x0a,0xfb,0x3d,0x8d,0x34,0xca,0x27,0x87,0xe5,0x21,0x03,0x91,0x0e,0x68,0xb0,0x26,0x14,0xe5,0xec,0x45,0x1e,0xbf,0x94,0x0f,0xba,0x6d,0x3d,0xc6,0x2b,0xe3,0xc0,0x52,0xf8,0x8c,0xd5,0x74,0x29,0xe4,0x18,0x4c,0xe6,0xb0,0xb1,0x79,0xf0,0x44},
+ {0xba,0xd6,0x47,0xa4,0xc3,0x82,0x91,0x7f,0xb7,0x29,0x27,0x4b,0xd1,0x14,0x00,0xd5,0x87,0xa0,0x64,0xb8,0x1c,0xf1,0x3c,0xe3,0xf3,0x55,0x1b,0xeb,0x73,0x7e,0x4a,0x15,0x33,0xbb,0xa5,0x08,0x44,0xbc,0x12,0xa2,0x02,0xed,0x5e,0xc7,0xc3,0x48,0x50,0x8d,0x44,0xec,0xbf,0x5a,0x0c,0xeb,0x1b,0xdd,0xeb,0x06,0xe2,0x46,0xf1,0xcc,0x45,0x29,0xb3,0x03,0xd0,0xe7,0x79,0xa1,0x32,0xc8,0x7e,0x4d,0x12,0x00,0x0a,0x9d,0x72,0x5f,0xf3,0x8f,0x6d,0x0e,0xa1,0xd4,0xc1,0x62,0x98,0x7a,0xb2,0x38,0x59,0xac,0xb8,0x68},
+ {0xa4,0x8c,0x7d,0x7b,0xb6,0x06,0x98,0x49,0x39,0x27,0xd2,0x27,0x84,0xe2,0x5b,0x57,0xb9,0x53,0x45,0x20,0xe7,0x5c,0x08,0xbb,0x84,0x78,0x41,0xae,0x41,0x4c,0xb6,0x38,0x31,0x71,0x15,0x77,0xeb,0xee,0x0c,0x3a,0x88,0xaf,0xc8,0x00,0x89,0x15,0x27,0x9b,0x36,0xa7,0x59,0xda,0x68,0xb6,0x65,0x80,0xbd,0x38,0xcc,0xa2,0xb6,0x7b,0xe5,0x51,0xa4,0xe3,0x9d,0x68,0x91,0xad,0x9d,0x8f,0x37,0x91,0xfb,0xf8,0x28,0x24,0x5f,0x17,0x88,0xb9,0xcf,0x9f,0x32,0xb5,0x0a,0x05,0x9f,0xc0,0x54,0x13,0xa2,0xdf,0x65,0x78},
+ {0xb1,0x21,0x32,0xaa,0x9a,0x2c,0x6f,0xba,0xa7,0x23,0xba,0x3b,0x53,0x21,0xa0,0x6c,0x3a,0x2c,0x19,0x92,0x4f,0x76,0xea,0x9d,0xe0,0x17,0x53,0x2e,0x5d,0xdd,0x6e,0x1d,0xbf,0xa3,0x4e,0x94,0xd0,0x5c,0x1a,0x6b,0xd2,0xc0,0x9d,0xb3,0x3a,0x35,0x70,0x74,0x49,0x2e,0x54,0x28,0x82,0x52,0xb2,0x71,0x7e,0x92,0x3c,0x28,0x69,0xea,0x1b,0x46,0x36,0xda,0x0f,0xab,0xac,0x8a,0x7a,0x21,0xc8,0x49,0x35,0x3d,0x54,0xc6,0x28,0xa5,0x68,0x75,0xab,0x13,0x8b,0x5b,0xd0,0x37,0x37,0xbc,0x2c,0x3a,0x62,0xef,0x3c,0x23},
+ {0xd9,0x34,0x92,0xf3,0xed,0x5d,0xa7,0xe2,0xf9,0x58,0xb5,0xe1,0x80,0x76,0x3d,0x96,0xfb,0x23,0x3c,0x6e,0xac,0x41,0x27,0x2c,0xc3,0x01,0x0e,0x32,0xa1,0x24,0x90,0x3a,0x8f,0x3e,0xdd,0x04,0x66,0x59,0xb7,0x59,0x2c,0x70,0x88,0xe2,0x77,0x03,0xb3,0x6c,0x23,0xc3,0xd9,0x5e,0x66,0x9c,0x33,0xb1,0x2f,0xe5,0xbc,0x61,0x60,0xe7,0x15,0x09,0x7e,0xa3,0x34,0xa8,0x35,0xe8,0x7d,0xdf,0xea,0x57,0x98,0x68,0xda,0x9c,0xe1,0x8b,0x26,0xb3,0x67,0x71,0x36,0x85,0x11,0x2c,0xc2,0xd5,0xef,0xdb,0xd9,0xb3,0x9e,0x58},
+ {0x5e,0x51,0xaa,0x49,0x54,0x63,0x5b,0xed,0x3a,0x82,0xc6,0x0b,0x9f,0xc4,0x65,0xa8,0xc4,0xd1,0x42,0x5b,0xe9,0x1f,0x0c,0x85,0xb9,0x15,0xd3,0x03,0x6f,0x6d,0xd7,0x30,0x1d,0x9c,0x2f,0x63,0x0e,0xdd,0xcc,0x2e,0x15,0x31,0x89,0x76,0x96,0xb6,0xd0,0x51,0x58,0x7a,0x63,0xa8,0x6b,0xb7,0xdf,0x52,0x39,0xef,0x0e,0xa0,0x49,0x7d,0xd3,0x6d,0xc7,0xe4,0x06,0x21,0x17,0x44,0x44,0x6c,0x69,0x7f,0x8d,0x92,0x80,0xd6,0x53,0xfb,0x26,0x3f,0x4d,0x69,0xa4,0x9e,0x73,0xb4,0xb0,0x4b,0x86,0x2e,0x11,0x97,0xc6,0x10},
+ {0xde,0x5f,0xbe,0x7d,0x27,0xc4,0x93,0x64,0xa2,0x7e,0xad,0x19,0xad,0x4f,0x5d,0x26,0x90,0x45,0x30,0x46,0xc8,0xdf,0x00,0x0e,0x09,0xfe,0x66,0xed,0xab,0x1c,0xe6,0x25,0x05,0xc8,0x58,0x83,0xa0,0x2a,0xa6,0x0c,0x47,0x42,0x20,0x7a,0xe3,0x4a,0x3d,0x6a,0xdc,0xed,0x11,0x3b,0xa6,0xd3,0x64,0x74,0xef,0x06,0x08,0x55,0xaf,0x9b,0xbf,0x03,0x04,0x66,0x58,0xcc,0x28,0xe1,0x13,0x3f,0x7e,0x74,0x59,0xb4,0xec,0x73,0x58,0x6f,0xf5,0x68,0x12,0xcc,0xed,0x3d,0xb6,0xa0,0x2c,0xe2,0x86,0x45,0x63,0x78,0x6d,0x56},
+ {0x34,0x08,0xc1,0x9c,0x9f,0xa4,0x37,0x16,0x51,0xc4,0x9b,0xa8,0xd5,0x56,0x8e,0xbc,0xdb,0xd2,0x7f,0x7f,0x0f,0xec,0xb5,0x1c,0xd9,0x35,0xcc,0x5e,0xca,0x5b,0x97,0x33,0xd0,0x2f,0x5a,0xc6,0x85,0x42,0x05,0xa1,0xc3,0x67,0x16,0xf3,0x2a,0x11,0x64,0x6c,0x58,0xee,0x1a,0x73,0x40,0xe2,0x0a,0x68,0x2a,0xb2,0x93,0x47,0xf3,0xa5,0xfb,0x14,0xd4,0xf7,0x85,0x69,0x16,0x46,0xd7,0x3c,0x57,0x00,0xc8,0xc9,0x84,0x5e,0x3e,0x59,0x1e,0x13,0x61,0x7b,0xb6,0xf2,0xc3,0x2f,0x6c,0x52,0xfc,0x83,0xea,0x9c,0x82,0x14},
+ {0xc2,0x95,0xdd,0x97,0x84,0x7b,0x43,0xff,0xa7,0xb5,0x4e,0xaa,0x30,0x4e,0x74,0x6c,0x8b,0xe8,0x85,0x3c,0x61,0x5d,0x0c,0x9e,0x73,0x81,0x75,0x5f,0x1e,0xc7,0xd9,0x2f,0xb8,0xec,0x71,0x4e,0x2f,0x0b,0xe7,0x21,0xe3,0x77,0xa4,0x40,0xb9,0xdd,0x56,0xe6,0x80,0x4f,0x1d,0xce,0xce,0x56,0x65,0xbf,0x7e,0x7b,0x5d,0x53,0xc4,0x3b,0xfc,0x05,0xdd,0xde,0xaf,0x52,0xae,0xb3,0xb8,0x24,0xcf,0x30,0x3b,0xed,0x8c,0x63,0x95,0x34,0x95,0x81,0xbe,0xa9,0x83,0xbc,0xa4,0x33,0x04,0x1f,0x65,0x5c,0x47,0x67,0x37,0x37},
+ {0xd9,0xad,0xd1,0x40,0xfd,0x99,0xba,0x2f,0x27,0xd0,0xf4,0x96,0x6f,0x16,0x07,0xb3,0xae,0x3b,0xf0,0x15,0x52,0xf0,0x63,0x43,0x99,0xf9,0x18,0x3b,0x6c,0xa5,0xbe,0x1f,0x90,0x65,0x24,0x14,0xcb,0x95,0x40,0x63,0x35,0x55,0xc1,0x16,0x40,0x14,0x12,0xef,0x60,0xbc,0x10,0x89,0x0c,0x14,0x38,0x9e,0x8c,0x7c,0x90,0x30,0x57,0x90,0xf5,0x6b,0x8a,0x5b,0x41,0xe1,0xf1,0x78,0xa7,0x0f,0x7e,0xa7,0xc3,0xba,0xf7,0x9f,0x40,0x06,0x50,0x9a,0xa2,0x9a,0xb8,0xd7,0x52,0x6f,0x56,0x5a,0x63,0x7a,0xf6,0x1c,0x52,0x02},
+ {0x94,0x52,0x9d,0x0a,0x0b,0xee,0x3f,0x51,0x66,0x5a,0xdf,0x0f,0x5c,0xe7,0x98,0x8f,0xce,0x07,0xe1,0xbf,0x88,0x86,0x61,0xd4,0xed,0x2c,0x38,0x71,0x7e,0x0a,0xa0,0x3f,0xe4,0x5e,0x2f,0x77,0x20,0x67,0x14,0xb1,0xce,0x9a,0x07,0x96,0xb1,0x94,0xf8,0xe8,0x4a,0x82,0xac,0x00,0x4d,0x22,0xf8,0x4a,0xc4,0x6c,0xcd,0xf7,0xd9,0x53,0x17,0x00,0x34,0xdb,0x3d,0x96,0x2d,0x23,0x69,0x3c,0x58,0x38,0x97,0xb4,0xda,0x87,0xde,0x1d,0x85,0xf2,0x91,0xa0,0xf9,0xd1,0xd7,0xaa,0xb6,0xed,0x48,0xa0,0x2f,0xfe,0xb5,0x12},
+ {0x4d,0xe3,0xfc,0x96,0xc4,0xfb,0xf0,0x71,0xed,0x5b,0xf3,0xad,0x6b,0x82,0xb9,0x73,0x61,0xc5,0x28,0xff,0x61,0x72,0x04,0xd2,0x6f,0x20,0xb1,0x6f,0xf9,0x76,0x9b,0x74,0x92,0x1e,0x6f,0xad,0x26,0x7c,0x2b,0xdf,0x13,0x89,0x4b,0x50,0x23,0xd3,0x66,0x4b,0xc3,0x8b,0x1c,0x75,0xc0,0x9d,0x40,0x8c,0xb8,0xc7,0x96,0x07,0xc2,0x93,0x7e,0x6f,0x05,0xae,0xa6,0xae,0x04,0xf6,0x5a,0x1f,0x99,0x9c,0xe4,0xbe,0xf1,0x51,0x23,0xc1,0x66,0x6b,0xff,0xee,0xb5,0x08,0xa8,0x61,0x51,0x21,0xe0,0x01,0x0f,0xc1,0xce,0x0f},
+ {0x44,0x1e,0xfe,0x49,0xa6,0x58,0x4d,0x64,0x7e,0x77,0xad,0x31,0xa2,0xae,0xfc,0x21,0xd2,0xd0,0x7f,0x88,0x5a,0x1c,0x44,0x02,0xf3,0x11,0xc5,0x83,0x71,0xaa,0x01,0x49,0x45,0x4e,0x24,0xc4,0x9d,0xd2,0xf2,0x3d,0x0a,0xde,0xd8,0x93,0x74,0x0e,0x02,0x2b,0x4d,0x21,0x0c,0x82,0x7e,0x06,0xc8,0x6c,0x0a,0xb9,0xea,0x6f,0x16,0x79,0x37,0x41,0xf0,0xf8,0x1a,0x8c,0x54,0xb7,0xb1,0x08,0xb4,0x99,0x62,0x24,0x7c,0x7a,0x0f,0xce,0x39,0xd9,0x06,0x1e,0xf9,0xb0,0x60,0xf7,0x13,0x12,0x6d,0x72,0x7b,0x88,0xbb,0x41},
+ {0xbe,0x46,0x43,0x74,0x44,0x7d,0xe8,0x40,0x25,0x2b,0xb5,0x15,0xd4,0xda,0x48,0x1d,0x3e,0x60,0x3b,0xa1,0x18,0x8a,0x3a,0x7c,0xf7,0xbd,0xcd,0x2f,0xc1,0x28,0xb7,0x4e,0xae,0x91,0x66,0x7c,0x59,0x4c,0x23,0x7e,0xc8,0xb4,0x85,0x0a,0x3d,0x9d,0x88,0x64,0xe7,0xfa,0x4a,0x35,0x0c,0xc9,0xe2,0xda,0x1d,0x9e,0x6a,0x0c,0x07,0x1e,0x87,0x0a,0x89,0x89,0xbc,0x4b,0x99,0xb5,0x01,0x33,0x60,0x42,0xdd,0x5b,0x3a,0xae,0x6b,0x73,0x3c,0x9e,0xd5,0x19,0xe2,0xad,0x61,0x0d,0x64,0xd4,0x85,0x26,0x0f,0x30,0xe7,0x3e},
+ {0xb7,0xd6,0x7d,0x9e,0xe4,0x55,0xd2,0xf5,0xac,0x1e,0x0b,0x61,0x5c,0x11,0x16,0x80,0xca,0x87,0xe1,0x92,0x5d,0x97,0x99,0x3c,0xc2,0x25,0x91,0x97,0x62,0x57,0x81,0x13,0x18,0x75,0x1e,0x84,0x47,0x79,0xfa,0x43,0xd7,0x46,0x9c,0x63,0x59,0xfa,0xc6,0xe5,0x74,0x2b,0x05,0xe3,0x1d,0x5e,0x06,0xa1,0x30,0x90,0xb8,0xcf,0xa2,0xc6,0x47,0x7d,0xe0,0xd6,0xf0,0x8e,0x14,0xd0,0xda,0x3f,0x3c,0x6f,0x54,0x91,0x9a,0x74,0x3e,0x9d,0x57,0x81,0xbb,0x26,0x10,0x62,0xec,0x71,0x80,0xec,0xc9,0x34,0x8d,0xf5,0x8c,0x14},
+ {0x27,0xf0,0x34,0x79,0xf6,0x92,0xa4,0x46,0xa9,0x0a,0x84,0xf6,0xbe,0x84,0x99,0x46,0x54,0x18,0x61,0x89,0x2a,0xbc,0xa1,0x5c,0xd4,0xbb,0x5d,0xbd,0x1e,0xfa,0xf2,0x3f,0x6d,0x75,0xe4,0x9a,0x7d,0x2f,0x57,0xe2,0x7f,0x48,0xf3,0x88,0xbb,0x45,0xc3,0x56,0x8d,0xa8,0x60,0x69,0x6d,0x0b,0xd1,0x9f,0xb9,0xa1,0xae,0x4e,0xad,0xeb,0x8f,0x27,0x66,0x39,0x93,0x8c,0x1f,0x68,0xaa,0xb1,0x98,0x0c,0x29,0x20,0x9c,0x94,0x21,0x8c,0x52,0x3c,0x9d,0x21,0x91,0x52,0x11,0x39,0x7b,0x67,0x9c,0xfe,0x02,0xdd,0x04,0x41},
+ {0x2a,0x42,0x24,0x11,0x5e,0xbf,0xb2,0x72,0xb5,0x3a,0xa3,0x98,0x33,0x0c,0xfa,0xa1,0x66,0xb6,0x52,0xfa,0x01,0x61,0xcb,0x94,0xd5,0x53,0xaf,0xaf,0x00,0x3b,0x86,0x2c,0xb8,0x6a,0x09,0xdb,0x06,0x4e,0x21,0x81,0x35,0x4f,0xe4,0x0c,0xc9,0xb6,0xa8,0x21,0xf5,0x2a,0x9e,0x40,0x2a,0xc1,0x24,0x65,0x81,0xa4,0xfc,0x8e,0xa4,0xb5,0x65,0x01,0x76,0x6a,0x84,0xa0,0x74,0xa4,0x90,0xf1,0xc0,0x7c,0x2f,0xcd,0x84,0xf9,0xef,0x12,0x8f,0x2b,0xaa,0x58,0x06,0x29,0x5e,0x69,0xb8,0xc8,0xfe,0xbf,0xd9,0x67,0x1b,0x59},
+ {0xfa,0x9b,0xb4,0x80,0x1c,0x0d,0x2f,0x31,0x8a,0xec,0xf3,0xab,0x5e,0x51,0x79,0x59,0x88,0x1c,0xf0,0x9e,0xc0,0x33,0x70,0x72,0xcb,0x7b,0x8f,0xca,0xc7,0x2e,0xe0,0x3d,0x5d,0xb5,0x18,0x9f,0x71,0xb3,0xb9,0x99,0x1e,0x64,0x8c,0xa1,0xfa,0xe5,0x65,0xe4,0xed,0x05,0x9f,0xc2,0x36,0x11,0x08,0x61,0x8b,0x12,0x30,0x70,0x86,0x4f,0x9b,0x48,0xef,0x92,0xeb,0x3a,0x2d,0x10,0x32,0xd2,0x61,0xa8,0x16,0x61,0xb4,0x53,0x62,0xe1,0x24,0xaa,0x0b,0x19,0xe7,0xab,0x7e,0x3d,0xbf,0xbe,0x6c,0x49,0xba,0xfb,0xf5,0x49},
+ {0xd4,0xcf,0x5b,0x8a,0x10,0x9a,0x94,0x30,0xeb,0x73,0x64,0xbc,0x70,0xdd,0x40,0xdc,0x1c,0x0d,0x7c,0x30,0xc1,0x94,0xc2,0x92,0x74,0x6e,0xfa,0xcb,0x6d,0xa8,0x04,0x56,0x2e,0x57,0x9c,0x1e,0x8c,0x62,0x5d,0x15,0x41,0x47,0x88,0xc5,0xac,0x86,0x4d,0x8a,0xeb,0x63,0x57,0x51,0xf6,0x52,0xa3,0x91,0x5b,0x51,0x67,0x88,0xc2,0xa6,0xa1,0x06,0xb6,0x64,0x17,0x7c,0xd4,0xd1,0x88,0x72,0x51,0x8b,0x41,0xe0,0x40,0x11,0x54,0x72,0xd1,0xf6,0xac,0x18,0x60,0x1a,0x03,0x9f,0xc6,0x42,0x27,0xfe,0x89,0x9e,0x98,0x20},
+ {0x7f,0xcc,0x2d,0x3a,0xfd,0x77,0x97,0x49,0x92,0xd8,0x4f,0xa5,0x2c,0x7c,0x85,0x32,0xa0,0xe3,0x07,0xd2,0x64,0xd8,0x79,0xa2,0x29,0x7e,0xa6,0x0c,0x1d,0xed,0x03,0x04,0x2e,0xec,0xea,0x85,0x8b,0x27,0x74,0x16,0xdf,0x2b,0xcb,0x7a,0x07,0xdc,0x21,0x56,0x5a,0xf4,0xcb,0x61,0x16,0x4c,0x0a,0x64,0xd3,0x95,0x05,0xf7,0x50,0x99,0x0b,0x73,0x52,0xc5,0x4e,0x87,0x35,0x2d,0x4b,0xc9,0x8d,0x6f,0x24,0x98,0xcf,0xc8,0xe6,0xc5,0xce,0x35,0xc0,0x16,0xfa,0x46,0xcb,0xf7,0xcc,0x3d,0x30,0x08,0x43,0x45,0xd7,0x5b},
+ {0xc2,0x4c,0xb2,0x28,0x95,0xd1,0x9a,0x7f,0x81,0xc1,0x35,0x63,0x65,0x54,0x6b,0x7f,0x36,0x72,0xc0,0x4f,0x6e,0xb6,0xb8,0x66,0x83,0xad,0x80,0x73,0x00,0x78,0x3a,0x13,0x2a,0x79,0xe7,0x15,0x21,0x93,0xc4,0x85,0xc9,0xdd,0xcd,0xbd,0xa2,0x89,0x4c,0xc6,0x62,0xd7,0xa3,0xad,0xa8,0x3d,0x1e,0x9d,0x2c,0xf8,0x67,0x30,0x12,0xdb,0xb7,0x5b,0xbe,0x62,0xca,0xc6,0x67,0xf4,0x61,0x09,0xee,0x52,0x19,0x21,0xd6,0x21,0xec,0x04,0x70,0x47,0xd5,0x9b,0x77,0x60,0x23,0x18,0xd2,0xe0,0xf0,0x58,0x6d,0xca,0x0d,0x74},
+ {0x4e,0xce,0xcf,0x52,0x07,0xee,0x48,0xdf,0xb7,0x08,0xec,0x06,0xf3,0xfa,0xff,0xc3,0xc4,0x59,0x54,0xb9,0x2a,0x0b,0x71,0x05,0x8d,0xa3,0x3e,0x96,0xfa,0x25,0x1d,0x16,0x3c,0x43,0x78,0x04,0x57,0x8c,0x1a,0x23,0x9d,0x43,0x81,0xc2,0x0e,0x27,0xb5,0xb7,0x9f,0x07,0xd9,0xe3,0xea,0x99,0xaa,0xdb,0xd9,0x03,0x2b,0x6c,0x25,0xf5,0x03,0x2c,0x7d,0xa4,0x53,0x7b,0x75,0x18,0x0f,0x79,0x79,0x58,0x0c,0xcf,0x30,0x01,0x7b,0x30,0xf9,0xf7,0x7e,0x25,0x77,0x3d,0x90,0x31,0xaf,0xbb,0x96,0xbd,0xbd,0x68,0x94,0x69},
+ {0xcf,0xfe,0xda,0xf4,0x46,0x2f,0x1f,0xbd,0xf7,0xd6,0x7f,0xa4,0x14,0x01,0xef,0x7c,0x7f,0xb3,0x47,0x4a,0xda,0xfd,0x1f,0xd3,0x85,0x57,0x90,0x73,0xa4,0x19,0x52,0x52,0x48,0x19,0xa9,0x6a,0xe6,0x3d,0xdd,0xd8,0xcc,0xd2,0xc0,0x2f,0xc2,0x64,0x50,0x48,0x2f,0xea,0xfd,0x34,0x66,0x24,0x48,0x9b,0x3a,0x2e,0x4a,0x6c,0x4e,0x1c,0x3e,0x29,0xe1,0x12,0x51,0x92,0x4b,0x13,0x6e,0x37,0xa0,0x5d,0xa1,0xdc,0xb5,0x78,0x37,0x70,0x11,0x31,0x1c,0x46,0xaf,0x89,0x45,0xb0,0x23,0x28,0x03,0x7f,0x44,0x5c,0x60,0x5b},
+ {0x89,0x7c,0xc4,0x20,0x59,0x80,0x65,0xb9,0xcc,0x8f,0x3b,0x92,0x0c,0x10,0xf0,0xe7,0x77,0xef,0xe2,0x02,0x65,0x25,0x01,0x00,0xee,0xb3,0xae,0xa8,0xce,0x6d,0xa7,0x24,0x4c,0xf0,0xe7,0xf0,0xc6,0xfe,0xe9,0x3b,0x62,0x49,0xe3,0x75,0x9e,0x57,0x6a,0x86,0x1a,0xe6,0x1d,0x1e,0x16,0xef,0x42,0x55,0xd5,0xbd,0x5a,0xcc,0xf4,0xfe,0x12,0x2f,0x40,0xc7,0xc0,0xdf,0xb2,0x22,0x45,0x0a,0x07,0xa4,0xc9,0x40,0x7f,0x6e,0xd0,0x10,0x68,0xf6,0xcf,0x78,0x41,0x14,0xcf,0xc6,0x90,0x37,0xa4,0x18,0x25,0x7b,0x60,0x5e},
+ {0x18,0x18,0xdf,0x6c,0x8f,0x1d,0xb3,0x58,0xa2,0x58,0x62,0xc3,0x4f,0xa7,0xcf,0x35,0x6e,0x1d,0xe6,0x66,0x4f,0xff,0xb3,0xe1,0xf7,0xd5,0xcd,0x6c,0xab,0xac,0x67,0x50,0x14,0xcf,0x96,0xa5,0x1c,0x43,0x2c,0xa0,0x00,0xe4,0xd3,0xae,0x40,0x2d,0xc4,0xe3,0xdb,0x26,0x0f,0x2e,0x80,0x26,0x45,0xd2,0x68,0x70,0x45,0x9e,0x13,0x33,0x1f,0x20,0x51,0x9d,0x03,0x08,0x6b,0x7f,0x52,0xfd,0x06,0x00,0x7c,0x01,0x64,0x49,0xb1,0x18,0xa8,0xa4,0x25,0x2e,0xb0,0x0e,0x22,0xd5,0x75,0x03,0x46,0x62,0x88,0xba,0x7c,0x39},
+ {0xb2,0x59,0x59,0xf0,0x93,0x30,0xc1,0x30,0x76,0x79,0xa9,0xe9,0x8d,0xa1,0x3a,0xe2,0x26,0x5e,0x1d,0x72,0x91,0xd4,0x2f,0x22,0x3a,0x6c,0x6e,0x76,0x20,0xd3,0x39,0x23,0xe7,0x79,0x13,0xc8,0xfb,0xc3,0x15,0x78,0xf1,0x2a,0xe1,0xdd,0x20,0x94,0x61,0xa6,0xd5,0xfd,0xa8,0x85,0xf8,0xc0,0xa9,0xff,0x52,0xc2,0xe1,0xc1,0x22,0x40,0x1b,0x77,0xa7,0x2f,0x3a,0x51,0x86,0xd9,0x7d,0xd8,0x08,0xcf,0xd4,0xf9,0x71,0x9b,0xac,0xf5,0xb3,0x83,0xa2,0x1e,0x1b,0xc3,0x6b,0xd0,0x76,0x1a,0x97,0x19,0x92,0x18,0x1a,0x33},
+ {0xc6,0x80,0x4f,0xfb,0x45,0x6f,0x16,0xf5,0xcf,0x75,0xc7,0x61,0xde,0xc7,0x36,0x9c,0x1c,0xd9,0x41,0x90,0x1b,0xe8,0xd4,0xe3,0x21,0xfe,0xbd,0x83,0x6b,0x7c,0x16,0x31,0xaf,0x72,0x75,0x9d,0x3a,0x2f,0x51,0x26,0x9e,0x4a,0x07,0x68,0x88,0xe2,0xcb,0x5b,0xc4,0xf7,0x80,0x11,0xc1,0xc1,0xed,0x84,0x7b,0xa6,0x49,0xf6,0x9f,0x61,0xc9,0x1a,0x68,0x10,0x4b,0x52,0x42,0x38,0x2b,0xf2,0x87,0xe9,0x9c,0xee,0x3b,0x34,0x68,0x50,0xc8,0x50,0x62,0x4a,0x84,0x71,0x9d,0xfc,0x11,0xb1,0x08,0x1f,0x34,0x36,0x24,0x61},
+ {0x8d,0x89,0x4e,0x87,0xdb,0x41,0x9d,0xd9,0x20,0xdc,0x07,0x6c,0xf1,0xa5,0xfe,0x09,0xbc,0x9b,0x0f,0xd0,0x67,0x2c,0x3d,0x79,0x40,0xff,0x5e,0x9e,0x30,0xe2,0xeb,0x46,0x38,0x26,0x2d,0x1a,0xe3,0x49,0x63,0x8b,0x35,0xfd,0xd3,0x9b,0x00,0xb7,0xdf,0x9d,0xa4,0x6b,0xa0,0xa3,0xb8,0xf1,0x8b,0x7f,0x45,0x04,0xd9,0x78,0x31,0xaa,0x22,0x15,0x38,0x49,0x61,0x69,0x53,0x2f,0x38,0x2c,0x10,0x6d,0x2d,0xb7,0x9a,0x40,0xfe,0xda,0x27,0xf2,0x46,0xb6,0x91,0x33,0xc8,0xe8,0x6c,0x30,0x24,0x05,0xf5,0x70,0xfe,0x45},
+ {0x8c,0x0b,0x0c,0x96,0xa6,0x75,0x48,0xda,0x20,0x2f,0x0e,0xef,0x76,0xd0,0x68,0x5b,0xd4,0x8f,0x0b,0x3d,0xcf,0x51,0xfb,0x07,0xd4,0x92,0xe3,0xa0,0x23,0x16,0x8d,0x42,0x91,0x14,0x95,0xc8,0x20,0x49,0xf2,0x62,0xa2,0x0c,0x63,0x3f,0xc8,0x07,0xf0,0x05,0xb8,0xd4,0xc9,0xf5,0xd2,0x45,0xbb,0x6f,0x45,0x22,0x7a,0xb5,0x6d,0x9f,0x61,0x16,0xfd,0x08,0xa3,0x01,0x44,0x4a,0x4f,0x08,0xac,0xca,0xa5,0x76,0xc3,0x19,0x22,0xa8,0x7d,0xbc,0xd1,0x43,0x46,0xde,0xb8,0xde,0xc6,0x38,0xbd,0x60,0x2d,0x59,0x81,0x1d},
+ {0x5f,0xac,0x0d,0xa6,0x56,0x87,0x36,0x61,0x57,0xdc,0xab,0xeb,0x6a,0x2f,0xe0,0x17,0x7d,0x0f,0xce,0x4c,0x2d,0x3f,0x19,0x7f,0xf0,0xdc,0xec,0x89,0x77,0x4a,0x23,0x20,0xe8,0xc5,0x85,0x7b,0x9f,0xb6,0x65,0x87,0xb2,0xba,0x68,0xd1,0x8b,0x67,0xf0,0x6f,0x9b,0x0f,0x33,0x1d,0x7c,0xe7,0x70,0x3a,0x7c,0x8e,0xaf,0xb0,0x51,0x6d,0x5f,0x3a,0x52,0xb2,0x78,0x71,0xb6,0x0d,0xd2,0x76,0x60,0xd1,0x1e,0xd5,0xf9,0x34,0x1c,0x07,0x70,0x11,0xe4,0xb3,0x20,0x4a,0x2a,0xf6,0x66,0xe3,0xff,0x3c,0x35,0x82,0xd6,0x7c},
+ {0xb6,0xfa,0x87,0xd8,0x5b,0xa4,0xe1,0x0b,0x6e,0x3b,0x40,0xba,0x32,0x6a,0x84,0x2a,0x00,0x60,0x6e,0xe9,0x12,0x10,0x92,0xd9,0x43,0x09,0xdc,0x3b,0x86,0xc8,0x38,0x28,0xf3,0xf4,0xac,0x68,0x60,0xcd,0x65,0xa6,0xd3,0xe3,0xd7,0x3c,0x18,0x2d,0xd9,0x42,0xd9,0x25,0x60,0x33,0x9d,0x38,0x59,0x57,0xff,0xd8,0x2c,0x2b,0x3b,0x25,0xf0,0x3e,0x30,0x50,0x46,0x4a,0xcf,0xb0,0x6b,0xd1,0xab,0x77,0xc5,0x15,0x41,0x6b,0x49,0xfa,0x9d,0x41,0xab,0xf4,0x8a,0xae,0xcf,0x82,0x12,0x28,0xa8,0x06,0xa6,0xb8,0xdc,0x21},
+ {0xc8,0x9f,0x9d,0x8c,0x46,0x04,0x60,0x5c,0xcb,0xa3,0x2a,0xd4,0x6e,0x09,0x40,0x25,0x9c,0x2f,0xee,0x12,0x4c,0x4d,0x5b,0x12,0xab,0x1d,0xa3,0x94,0x81,0xd0,0xc3,0x0b,0xba,0x31,0x77,0xbe,0xfa,0x00,0x8d,0x9a,0x89,0x18,0x9e,0x62,0x7e,0x60,0x03,0x82,0x7f,0xd9,0xf3,0x43,0x37,0x02,0xcc,0xb2,0x8b,0x67,0x6f,0x6c,0xbf,0x0d,0x84,0x5d,0x8b,0xe1,0x9f,0x30,0x0d,0x38,0x6e,0x70,0xc7,0x65,0xe1,0xb9,0xa6,0x2d,0xb0,0x6e,0xab,0x20,0xae,0x7d,0x99,0xba,0xbb,0x57,0xdd,0x96,0xc1,0x2a,0x23,0x76,0x42,0x3a},
+ {0xfa,0x84,0x70,0x8a,0x2c,0x43,0x42,0x4b,0x45,0xe5,0xb9,0xdf,0xe3,0x19,0x8a,0x89,0x5d,0xe4,0x58,0x9c,0x21,0x00,0x9f,0xbe,0xd1,0xeb,0x6d,0xa1,0xce,0x77,0xf1,0x1f,0xcb,0x7e,0x44,0xdb,0x72,0xc1,0xf8,0x3b,0xbd,0x2d,0x28,0xc6,0x1f,0xc4,0xcf,0x5f,0xfe,0x15,0xaa,0x75,0xc0,0xff,0xac,0x80,0xf9,0xa9,0xe1,0x24,0xe8,0xc9,0x70,0x07,0xfd,0xb5,0xb5,0x45,0x9a,0xd9,0x61,0xcf,0x24,0x79,0x3a,0x1b,0xe9,0x84,0x09,0x86,0x89,0x3e,0x3e,0x30,0x19,0x09,0x30,0xe7,0x1e,0x0b,0x50,0x41,0xfd,0x64,0xf2,0x39},
+ {0x9c,0xe2,0xe7,0xdb,0x17,0x34,0xad,0xa7,0x9c,0x13,0x9c,0x2b,0x6a,0x37,0x94,0xbd,0xa9,0x7b,0x59,0x93,0x8e,0x1b,0xe9,0xa0,0x40,0x98,0x88,0x68,0x34,0xd7,0x12,0x17,0xe1,0x7b,0x09,0xfe,0xab,0x4a,0x9b,0xd1,0x29,0x19,0xe0,0xdf,0xe1,0xfc,0x6d,0xa4,0xff,0xf1,0xa6,0x2c,0x94,0x08,0xc9,0xc3,0x4e,0xf1,0x35,0x2c,0x27,0x21,0xc6,0x65,0xdd,0x93,0x31,0xce,0xf8,0x89,0x2b,0xe7,0xbb,0xc0,0x25,0xa1,0x56,0x33,0x10,0x4d,0x83,0xfe,0x1c,0x2e,0x3d,0xa9,0x19,0x04,0x72,0xe2,0x9c,0xb1,0x0a,0x80,0xf9,0x22},
+ {0xcb,0xf8,0x9e,0x3e,0x8a,0x36,0x5a,0x60,0x15,0x47,0x50,0xa5,0x22,0xc0,0xe9,0xe3,0x8f,0x24,0x24,0x5f,0xb0,0x48,0x3d,0x55,0xe5,0x26,0x76,0x64,0xcd,0x16,0xf4,0x13,0xac,0xfd,0x6e,0x9a,0xdd,0x9f,0x02,0x42,0x41,0x49,0xa5,0x34,0xbe,0xce,0x12,0xb9,0x7b,0xf3,0xbd,0x87,0xb9,0x64,0x0f,0x64,0xb4,0xca,0x98,0x85,0xd3,0xa4,0x71,0x41,0x8c,0x4c,0xc9,0x99,0xaa,0x58,0x27,0xfa,0x07,0xb8,0x00,0xb0,0x6f,0x6f,0x00,0x23,0x92,0x53,0xda,0xad,0xdd,0x91,0xd2,0xfb,0xab,0xd1,0x4b,0x57,0xfa,0x14,0x82,0x50},
+ {0x4b,0xfe,0xd6,0x3e,0x15,0x69,0x02,0xc2,0xc4,0x77,0x1d,0x51,0x39,0x67,0x5a,0xa6,0x94,0xaf,0x14,0x2c,0x46,0x26,0xde,0xcb,0x4b,0xa7,0xab,0x6f,0xec,0x60,0xf9,0x22,0xd6,0x03,0xd0,0x53,0xbb,0x15,0x1a,0x46,0x65,0xc9,0xf3,0xbc,0x88,0x28,0x10,0xb2,0x5a,0x3a,0x68,0x6c,0x75,0x76,0xc5,0x27,0x47,0xb4,0x6c,0xc8,0xa4,0x58,0x77,0x3a,0x76,0x50,0xae,0x93,0xf6,0x11,0x81,0x54,0xa6,0x54,0xfd,0x1d,0xdf,0x21,0xae,0x1d,0x65,0x5e,0x11,0xf3,0x90,0x8c,0x24,0x12,0x94,0xf4,0xe7,0x8d,0x5f,0xd1,0x9f,0x5d},
+ {0x7f,0x72,0x63,0x6d,0xd3,0x08,0x14,0x03,0x33,0xb5,0xc7,0xd7,0xef,0x9a,0x37,0x6a,0x4b,0xe2,0xae,0xcc,0xc5,0x8f,0xe1,0xa9,0xd3,0xbe,0x8f,0x4f,0x91,0x35,0x2f,0x33,0x1e,0x52,0xd7,0xee,0x2a,0x4d,0x24,0x3f,0x15,0x96,0x2e,0x43,0x28,0x90,0x3a,0x8e,0xd4,0x16,0x9c,0x2e,0x77,0xba,0x64,0xe1,0xd8,0x98,0xeb,0x47,0xfa,0x87,0xc1,0x3b,0x0c,0xc2,0x86,0xea,0x15,0x01,0x47,0x6d,0x25,0xd1,0x46,0x6c,0xcb,0xb7,0x8a,0x99,0x88,0x01,0x66,0x3a,0xb5,0x32,0x78,0xd7,0x03,0xba,0x6f,0x90,0xce,0x81,0x0d,0x45},
+ {0x75,0x52,0x20,0xa6,0xa1,0xb6,0x7b,0x6e,0x83,0x8e,0x3c,0x41,0xd7,0x21,0x4f,0xaa,0xb2,0x5c,0x8f,0xe8,0x55,0xd1,0x56,0x6f,0xe1,0x5b,0x34,0xa6,0x4b,0x5d,0xe2,0x2d,0x3f,0x74,0xae,0x1c,0x96,0xd8,0x74,0xd0,0xed,0x63,0x1c,0xee,0xf5,0x18,0x6d,0xf8,0x29,0xed,0xf4,0xe7,0x5b,0xc5,0xbd,0x97,0x08,0xb1,0x3a,0x66,0x79,0xd2,0xba,0x4c,0xcd,0x1f,0xd7,0xa0,0x24,0x90,0xd1,0x80,0xf8,0x8a,0x28,0xfb,0x0a,0xc2,0x25,0xc5,0x19,0x64,0x3a,0x5f,0x4b,0x97,0xa3,0xb1,0x33,0x72,0x00,0xe2,0xef,0xbc,0x7f,0x7d},
+ {0x01,0x28,0x6b,0x26,0x6a,0x1e,0xef,0xfa,0x16,0x9f,0x73,0xd5,0xc4,0x68,0x6c,0x86,0x2c,0x76,0x03,0x1b,0xbc,0x2f,0x8a,0xf6,0x8d,0x5a,0xb7,0x87,0x5e,0x43,0x75,0x59,0x94,0x90,0xc2,0xf3,0xc5,0x5d,0x7c,0xcd,0xab,0x05,0x91,0x2a,0x9a,0xa2,0x81,0xc7,0x58,0x30,0x1c,0x42,0x36,0x1d,0xc6,0x80,0xd7,0xd4,0xd8,0xdc,0x96,0xd1,0x9c,0x4f,0x68,0x37,0x7b,0x6a,0xd8,0x97,0x92,0x19,0x63,0x7a,0xd1,0x1a,0x24,0x58,0xd0,0xd0,0x17,0x0c,0x1c,0x5c,0xad,0x9c,0x02,0xba,0x07,0x03,0x7a,0x38,0x84,0xd0,0xcd,0x7c},
+ {0x17,0x04,0x26,0x6d,0x2c,0x42,0xa6,0xdc,0xbd,0x40,0x82,0x94,0x50,0x3d,0x15,0xae,0x77,0xc6,0x68,0xfb,0xb4,0xc1,0xc0,0xa9,0x53,0xcf,0xd0,0x61,0xed,0xd0,0x8b,0x42,0x93,0xcc,0x60,0x67,0x18,0x84,0x0c,0x9b,0x99,0x2a,0xb3,0x1a,0x7a,0x00,0xae,0xcd,0x18,0xda,0x0b,0x62,0x86,0xec,0x8d,0xa8,0x44,0xca,0x90,0x81,0x84,0xca,0x93,0x35,0xa7,0x9a,0x84,0x5e,0x9a,0x18,0x13,0x92,0xcd,0xfa,0xd8,0x65,0x35,0xc3,0xd8,0xd4,0xd1,0xbb,0xfd,0x53,0x5b,0x54,0x52,0x8c,0xe6,0x63,0x2d,0xda,0x08,0x83,0x39,0x27},
+ {0x13,0xd4,0x5e,0x43,0x28,0x8d,0xc3,0x42,0xc9,0xcc,0x78,0x32,0x60,0xf3,0x50,0xbd,0xef,0x03,0xda,0x79,0x1a,0xab,0x07,0xbb,0x55,0x33,0x8c,0xbe,0xae,0x97,0x95,0x26,0x53,0x24,0x70,0x0a,0x4c,0x0e,0xa1,0xb9,0xde,0x1b,0x7d,0xd5,0x66,0x58,0xa2,0x0f,0xf7,0xda,0x27,0xcd,0xb5,0xd9,0xb9,0xff,0xfd,0x33,0x2c,0x49,0x45,0x29,0x2c,0x57,0xbe,0x30,0xcd,0xd6,0x45,0xc7,0x7f,0xc7,0xfb,0xae,0xba,0xe3,0xd3,0xe8,0xdf,0xe4,0x0c,0xda,0x5d,0xaa,0x30,0x88,0x2c,0xa2,0x80,0xca,0x5b,0xc0,0x98,0x54,0x98,0x7f},
+ {0x17,0xe1,0x0b,0x9f,0x88,0xce,0x49,0x38,0x88,0xa2,0x54,0x7b,0x1b,0xad,0x05,0x80,0x1c,0x92,0xfc,0x23,0x9f,0xc3,0xa3,0x3d,0x04,0xf3,0x31,0x0a,0x47,0xec,0xc2,0x76,0x63,0x63,0xbf,0x0f,0x52,0x15,0x56,0xd3,0xa6,0xfb,0x4d,0xcf,0x45,0x5a,0x04,0x08,0xc2,0xa0,0x3f,0x87,0xbc,0x4f,0xc2,0xee,0xe7,0x12,0x9b,0xd6,0x3c,0x65,0xf2,0x30,0x85,0x0c,0xc1,0xaa,0x38,0xc9,0x08,0x8a,0xcb,0x6b,0x27,0xdb,0x60,0x9b,0x17,0x46,0x70,0xac,0x6f,0x0e,0x1e,0xc0,0x20,0xa9,0xda,0x73,0x64,0x59,0xf1,0x73,0x12,0x2f},
+ {0x11,0x1e,0xe0,0x8a,0x7c,0xfc,0x39,0x47,0x9f,0xab,0x6a,0x4a,0x90,0x74,0x52,0xfd,0x2e,0x8f,0x72,0x87,0x82,0x8a,0xd9,0x41,0xf2,0x69,0x5b,0xd8,0x2a,0x57,0x9e,0x5d,0xc0,0x0b,0xa7,0x55,0xd7,0x8b,0x48,0x30,0xe7,0x42,0xd4,0xf1,0xa4,0xb5,0xd6,0x06,0x62,0x61,0x59,0xbc,0x9e,0xa6,0xd1,0xea,0x84,0xf7,0xc5,0xed,0x97,0x19,0xac,0x38,0x3b,0xb1,0x51,0xa7,0x17,0xb5,0x66,0x06,0x8c,0x85,0x9b,0x7e,0x86,0x06,0x7d,0x74,0x49,0xde,0x4d,0x45,0x11,0xc0,0xac,0xac,0x9c,0xe6,0xe9,0xbf,0x9c,0xcd,0xdf,0x22},
+ {0xd9,0x0c,0x0d,0xc3,0xe0,0xd2,0xdb,0x8d,0x33,0x43,0xbb,0xac,0x5f,0x66,0x8e,0xad,0x1f,0x96,0x2a,0x32,0x8c,0x25,0x6b,0x8f,0xc7,0xc1,0x48,0x54,0xc0,0x16,0x29,0x6b,0xa1,0xe0,0x3b,0x10,0xb4,0x59,0xec,0x56,0x69,0xf9,0x59,0xd2,0xec,0xba,0xe3,0x2e,0x32,0xcd,0xf5,0x13,0x94,0xb2,0x7c,0x79,0x72,0xe4,0xcd,0x24,0x78,0x87,0xe9,0x0f,0x3b,0x91,0xba,0x0a,0xd1,0x34,0xdb,0x7e,0x0e,0xac,0x6d,0x2e,0x82,0xcd,0xa3,0x4e,0x15,0xf8,0x78,0x65,0xff,0x3d,0x08,0x66,0x17,0x0a,0xf0,0x7f,0x30,0x3f,0x30,0x4c},
+ {0x85,0x8c,0xb2,0x17,0xd6,0x3b,0x0a,0xd3,0xea,0x3b,0x77,0x39,0xb7,0x77,0xd3,0xc5,0xbf,0x5c,0x6a,0x1e,0x8c,0xe7,0xc6,0xc6,0xc4,0xb7,0x2a,0x8b,0xf7,0xb8,0x61,0x0d,0x00,0x45,0xd9,0x0d,0x58,0x03,0xfc,0x29,0x93,0xec,0xbb,0x6f,0xa4,0x7a,0xd2,0xec,0xf8,0xa7,0xe2,0xc2,0x5f,0x15,0x0a,0x13,0xd5,0xa1,0x06,0xb7,0x1a,0x15,0x6b,0x41,0xb0,0x36,0xc1,0xe9,0xef,0xd7,0xa8,0x56,0x20,0x4b,0xe4,0x58,0xcd,0xe5,0x07,0xbd,0xab,0xe0,0x57,0x1b,0xda,0x2f,0xe6,0xaf,0xd2,0xe8,0x77,0x42,0xf7,0x2a,0x1a,0x19},
+ {0x31,0x14,0x3c,0xc5,0x4b,0xf7,0x16,0xce,0xde,0xed,0x72,0x20,0xce,0x25,0x97,0x2b,0xe7,0x3e,0xb2,0xb5,0x6f,0xc3,0xb9,0xb8,0x08,0xc9,0x5c,0x0b,0x45,0x0e,0x2e,0x7e,0xfb,0x0e,0x46,0x4f,0x43,0x2b,0xe6,0x9f,0xd6,0x07,0x36,0xa6,0xd4,0x03,0xd3,0xde,0x24,0xda,0xa0,0xb7,0x0e,0x21,0x52,0xf0,0x93,0x5b,0x54,0x00,0xbe,0x7d,0x7e,0x23,0x30,0xb4,0x01,0x67,0xed,0x75,0x35,0x01,0x10,0xfd,0x0b,0x9f,0xe6,0x94,0x10,0x23,0x22,0x7f,0xe4,0x83,0x15,0x0f,0x32,0x75,0xe3,0x55,0x11,0xb1,0x99,0xa6,0xaf,0x71},
+ {0x1d,0xb6,0x53,0x39,0x9b,0x6f,0xce,0x65,0xe6,0x41,0xa1,0xaf,0xea,0x39,0x58,0xc6,0xfe,0x59,0xf7,0xa9,0xfd,0x5f,0x43,0x0f,0x8e,0xc2,0xb1,0xc2,0xe9,0x42,0x11,0x02,0xd6,0x50,0x3b,0x47,0x1c,0x3c,0x42,0xea,0x10,0xef,0x38,0x3b,0x1f,0x7a,0xe8,0x51,0x95,0xbe,0xc9,0xb2,0x5f,0xbf,0x84,0x9b,0x1c,0x9a,0xf8,0x78,0xbc,0x1f,0x73,0x00,0x80,0x18,0xf8,0x48,0x18,0xc7,0x30,0xe4,0x19,0xc1,0xce,0x5e,0x22,0x0c,0x96,0xbf,0xe3,0x15,0xba,0x6b,0x83,0xe0,0xda,0xb6,0x08,0x58,0xe1,0x47,0x33,0x6f,0x4d,0x4c},
+ {0xc9,0x1f,0x7d,0xc1,0xcf,0xec,0xf7,0x18,0x14,0x3c,0x40,0x51,0xa6,0xf5,0x75,0x6c,0xdf,0x0c,0xee,0xf7,0x2b,0x71,0xde,0xdb,0x22,0x7a,0xe4,0xa7,0xaa,0xdd,0x3f,0x19,0x70,0x19,0x8f,0x98,0xfc,0xdd,0x0c,0x2f,0x1b,0xf5,0xb9,0xb0,0x27,0x62,0x91,0x6b,0xbe,0x76,0x91,0x77,0xc4,0xb6,0xc7,0x6e,0xa8,0x9f,0x8f,0xa8,0x00,0x95,0xbf,0x38,0x6f,0x87,0xe8,0x37,0x3c,0xc9,0xd2,0x1f,0x2c,0x46,0xd1,0x18,0x5a,0x1e,0xf6,0xa2,0x76,0x12,0x24,0x39,0x82,0xf5,0x80,0x50,0x69,0x49,0x0d,0xbf,0x9e,0xb9,0x6f,0x6a},
+ {0xeb,0x55,0x08,0x56,0xbb,0xc1,0x46,0x6a,0x9d,0xf0,0x93,0xf8,0x38,0xbb,0x16,0x24,0xc1,0xac,0x71,0x8f,0x37,0x11,0x1d,0xd7,0xea,0x96,0x18,0xa3,0x14,0x69,0xf7,0x75,0xc6,0x23,0xe4,0xb6,0xb5,0x22,0xb1,0xee,0x8e,0xff,0x86,0xf2,0x10,0x70,0x9d,0x93,0x8c,0x5d,0xcf,0x1d,0x83,0x2a,0xa9,0x90,0x10,0xeb,0xc5,0x42,0x9f,0xda,0x6f,0x13,0xd1,0xbd,0x05,0xa3,0xb1,0xdf,0x4c,0xf9,0x08,0x2c,0xf8,0x9f,0x9d,0x4b,0x36,0x0f,0x8a,0x58,0xbb,0xc3,0xa5,0xd8,0x87,0x2a,0xba,0xdc,0xe8,0x0b,0x51,0x83,0x21,0x02},
+ {0x14,0x2d,0xad,0x5e,0x38,0x66,0xf7,0x4a,0x30,0x58,0x7c,0xca,0x80,0xd8,0x8e,0xa0,0x3d,0x1e,0x21,0x10,0xe6,0xa6,0x13,0x0d,0x03,0x6c,0x80,0x7b,0xe1,0x1c,0x07,0x6a,0x7f,0x7a,0x30,0x43,0x01,0x71,0x5a,0x9d,0x5f,0xa4,0x7d,0xc4,0x9e,0xde,0x63,0xb0,0xd3,0x7a,0x92,0xbe,0x52,0xfe,0xbb,0x22,0x6c,0x42,0x40,0xfd,0x41,0xc4,0x87,0x13,0xf8,0x8a,0x97,0x87,0xd1,0xc3,0xd3,0xb5,0x13,0x44,0x0e,0x7f,0x3d,0x5a,0x2b,0x72,0xa0,0x7c,0x47,0xbb,0x48,0x48,0x7b,0x0d,0x92,0xdc,0x1e,0xaf,0x6a,0xb2,0x71,0x31},
+ {0xa8,0x4c,0x56,0x97,0x90,0x31,0x2f,0xa9,0x19,0xe1,0x75,0x22,0x4c,0xb8,0x7b,0xff,0x50,0x51,0x87,0xa4,0x37,0xfe,0x55,0x4f,0x5a,0x83,0xf0,0x3c,0x87,0xd4,0x1f,0x22,0xd1,0x47,0x8a,0xb2,0xd8,0xb7,0x0d,0xa6,0xf1,0xa4,0x70,0x17,0xd6,0x14,0xbf,0xa6,0x58,0xbd,0xdd,0x53,0x93,0xf8,0xa1,0xd4,0xe9,0x43,0x42,0x34,0x63,0x4a,0x51,0x6c,0x41,0x63,0x15,0x3a,0x4f,0x20,0x22,0x23,0x2d,0x03,0x0a,0xba,0xe9,0xe0,0x73,0xfb,0x0e,0x03,0x0f,0x41,0x4c,0xdd,0xe0,0xfc,0xaa,0x4a,0x92,0xfb,0x96,0xa5,0xda,0x48},
+ {0xc7,0x9c,0xa5,0x5c,0x66,0x8e,0xca,0x6e,0xa0,0xac,0x38,0x2e,0x4b,0x25,0x47,0xa8,0xce,0x17,0x1e,0xd2,0x08,0xc7,0xaf,0x31,0xf7,0x4a,0xd8,0xca,0xfc,0xd6,0x6d,0x67,0x93,0x97,0x4c,0xc8,0x5d,0x1d,0xf6,0x14,0x06,0x82,0x41,0xef,0xe3,0xf9,0x41,0x99,0xac,0x77,0x62,0x34,0x8f,0xb8,0xf5,0xcd,0xa9,0x79,0x8a,0x0e,0xfa,0x37,0xc8,0x58,0x58,0x90,0xfc,0x96,0x85,0x68,0xf9,0x0c,0x1b,0xa0,0x56,0x7b,0xf3,0xbb,0xdc,0x1d,0x6a,0xd6,0x35,0x49,0x7d,0xe7,0xc2,0xdc,0x0a,0x7f,0xa5,0xc6,0xf2,0x73,0x4f,0x1c},
+ {0xbb,0xa0,0x5f,0x30,0xbd,0x4f,0x7a,0x0e,0xad,0x63,0xc6,0x54,0xe0,0x4c,0x9d,0x82,0x48,0x38,0xe3,0x2f,0x83,0xc3,0x21,0xf4,0x42,0x4c,0xf6,0x1b,0x0d,0xc8,0x5a,0x79,0x84,0x34,0x7c,0xfc,0x6e,0x70,0x6e,0xb3,0x61,0xcf,0xc1,0xc3,0xb4,0xc9,0xdf,0x73,0xe5,0xc7,0x1c,0x78,0xc9,0x79,0x1d,0xeb,0x5c,0x67,0xaf,0x7d,0xdb,0x9a,0x45,0x70,0xb3,0x2b,0xb4,0x91,0x49,0xdb,0x91,0x1b,0xca,0xdc,0x02,0x4b,0x23,0x96,0x26,0x57,0xdc,0x78,0x8c,0x1f,0xe5,0x9e,0xdf,0x9f,0xd3,0x1f,0xe2,0x8c,0x84,0x62,0xe1,0x5f},
+ {0x1a,0x96,0x94,0xe1,0x4f,0x21,0x59,0x4e,0x4f,0xcd,0x71,0x0d,0xc7,0x7d,0xbe,0x49,0x2d,0xf2,0x50,0x3b,0xd2,0xcf,0x00,0x93,0x32,0x72,0x91,0xfc,0x46,0xd4,0x89,0x47,0x08,0xb2,0x7c,0x5d,0x2d,0x85,0x79,0x28,0xe7,0xf2,0x7d,0x68,0x70,0xdd,0xde,0xb8,0x91,0x78,0x68,0x21,0xab,0xff,0x0b,0xdc,0x35,0xaa,0x7d,0x67,0x43,0xc0,0x44,0x2b,0x8e,0xb7,0x4e,0x07,0xab,0x87,0x1c,0x1a,0x67,0xf4,0xda,0x99,0x8e,0xd1,0xc6,0xfa,0x67,0x90,0x4f,0x48,0xcd,0xbb,0xac,0x3e,0xe4,0xa4,0xb9,0x2b,0xef,0x2e,0xc5,0x60},
+ {0xf1,0x8b,0xfd,0x3b,0xbc,0x89,0x5d,0x0b,0x1a,0x55,0xf3,0xc9,0x37,0x92,0x6b,0xb0,0xf5,0x28,0x30,0xd5,0xb0,0x16,0x4c,0x0e,0xab,0xca,0xcf,0x2c,0x31,0x9c,0xbc,0x10,0x11,0x6d,0xae,0x7c,0xc2,0xc5,0x2b,0x70,0xab,0x8c,0xa4,0x54,0x9b,0x69,0xc7,0x44,0xb2,0x2e,0x49,0xba,0x56,0x40,0xbc,0xef,0x6d,0x67,0xb6,0xd9,0x48,0x72,0xd7,0x70,0x5b,0xa0,0xc2,0x3e,0x4b,0xe8,0x8a,0xaa,0xe0,0x81,0x17,0xed,0xf4,0x9e,0x69,0x98,0xd1,0x85,0x8e,0x70,0xe4,0x13,0x45,0x79,0x13,0xf4,0x76,0xa9,0xd3,0x5b,0x75,0x63},
+ {0x53,0x08,0xd1,0x2a,0x3e,0xa0,0x5f,0xb5,0x69,0x35,0xe6,0x9e,0x90,0x75,0x6f,0x35,0x90,0xb8,0x69,0xbe,0xfd,0xf1,0xf9,0x9f,0x84,0x6f,0xc1,0x8b,0xc4,0xc1,0x8c,0x0d,0xb7,0xac,0xf1,0x97,0x18,0x10,0xc7,0x3d,0xd8,0xbb,0x65,0xc1,0x5e,0x7d,0xda,0x5d,0x0f,0x02,0xa1,0x0f,0x9c,0x5b,0x8e,0x50,0x56,0x2a,0xc5,0x37,0x17,0x75,0x63,0x27,0xa9,0x19,0xb4,0x6e,0xd3,0x02,0x94,0x02,0xa5,0x60,0xb4,0x77,0x7e,0x4e,0xb4,0xf0,0x56,0x49,0x3c,0xd4,0x30,0x62,0xa8,0xcf,0xe7,0x66,0xd1,0x7a,0x8a,0xdd,0xc2,0x70},
+ {0x0e,0xec,0x6f,0x9f,0x50,0x94,0x61,0x65,0x8d,0x51,0xc6,0x46,0xa9,0x7e,0x2e,0xee,0x5c,0x9b,0xe0,0x67,0xf3,0xc1,0x33,0x97,0x95,0x84,0x94,0x63,0x63,0xac,0x0f,0x2e,0x13,0x7e,0xed,0xb8,0x7d,0x96,0xd4,0x91,0x7a,0x81,0x76,0xd7,0x0a,0x2f,0x25,0x74,0x64,0x25,0x85,0x0d,0xe0,0x82,0x09,0xe4,0xe5,0x3c,0xa5,0x16,0x38,0x61,0xb8,0x32,0x64,0xcd,0x48,0xe4,0xbe,0xf7,0xe7,0x79,0xd0,0x86,0x78,0x08,0x67,0x3a,0xc8,0x6a,0x2e,0xdb,0xe4,0xa0,0xd9,0xd4,0x9f,0xf8,0x41,0x4f,0x5a,0x73,0x5c,0x21,0x79,0x41},
+ {0x2a,0xed,0xdc,0xd7,0xe7,0x94,0x70,0x8c,0x70,0x9c,0xd3,0x47,0xc3,0x8a,0xfb,0x97,0x02,0xd9,0x06,0xa9,0x33,0xe0,0x3b,0xe1,0x76,0x9d,0xd9,0x0c,0xa3,0x44,0x03,0x70,0x34,0xcd,0x6b,0x28,0xb9,0x33,0xae,0xe4,0xdc,0xd6,0x9d,0x55,0xb6,0x7e,0xef,0xb7,0x1f,0x8e,0xd3,0xb3,0x1f,0x14,0x8b,0x27,0x86,0xc2,0x41,0x22,0x66,0x85,0xfa,0x31,0xf4,0x22,0x36,0x2e,0x42,0x6c,0x82,0xaf,0x2d,0x50,0x33,0x98,0x87,0x29,0x20,0xc1,0x23,0x91,0x38,0x2b,0xe1,0xb7,0xc1,0x9b,0x89,0x24,0x95,0xa9,0x12,0x23,0xbb,0x24},
+ {0xc3,0x67,0xde,0x32,0x17,0xed,0xa8,0xb1,0x48,0x49,0x1b,0x46,0x18,0x94,0xb4,0x3c,0xd2,0xbc,0xcf,0x76,0x43,0x43,0xbd,0x8e,0x08,0x80,0x18,0x1e,0x87,0x3e,0xee,0x0f,0x6b,0x5c,0xf8,0xf5,0x2a,0x0c,0xf8,0x41,0x94,0x67,0xfa,0x04,0xc3,0x84,0x72,0x68,0xad,0x1b,0xba,0xa3,0x99,0xdf,0x45,0x89,0x16,0x5d,0xeb,0xff,0xf9,0x2a,0x1d,0x0d,0xdf,0x1e,0x62,0x32,0xa1,0x8a,0xda,0xa9,0x79,0x65,0x22,0x59,0xa1,0x22,0xb8,0x30,0x93,0xc1,0x9a,0xa7,0x7b,0x19,0x04,0x40,0x76,0x1d,0x53,0x18,0x97,0xd7,0xac,0x16},
+ {0x3d,0x1d,0x9b,0x2d,0xaf,0x72,0xdf,0x72,0x5a,0x24,0x32,0xa4,0x36,0x2a,0x46,0x63,0x37,0x96,0xb3,0x16,0x79,0xa0,0xce,0x3e,0x09,0x23,0x30,0xb9,0xf6,0x0e,0x3e,0x12,0xad,0xb6,0x87,0x78,0xc5,0xc6,0x59,0xc9,0xba,0xfe,0x90,0x5f,0xad,0x9e,0xe1,0x94,0x04,0xf5,0x42,0xa3,0x62,0x4e,0xe2,0x16,0x00,0x17,0x16,0x18,0x4b,0xd3,0x4e,0x16,0x9a,0xe6,0x2f,0x19,0x4c,0xd9,0x7e,0x48,0x13,0x15,0x91,0x3a,0xea,0x2c,0xae,0x61,0x27,0xde,0xa4,0xb9,0xd3,0xf6,0x7b,0x87,0xeb,0xf3,0x73,0x10,0xc6,0x0f,0xda,0x78},
+ {0x6a,0xc6,0x2b,0xe5,0x28,0x5d,0xf1,0x5b,0x8e,0x1a,0xf0,0x70,0x18,0xe3,0x47,0x2c,0xdd,0x8b,0xc2,0x06,0xbc,0xaf,0x19,0x24,0x3a,0x17,0x6b,0x25,0xeb,0xde,0x25,0x2d,0x94,0x3a,0x0c,0x68,0xf1,0x80,0x9f,0xa2,0xe6,0xe7,0xe9,0x1a,0x15,0x7e,0xf7,0x71,0x73,0x79,0x01,0x48,0x58,0xf1,0x00,0x11,0xdd,0x8d,0xb3,0x16,0xb3,0xa4,0x4a,0x05,0xb8,0x7c,0x26,0x19,0x8d,0x46,0xc8,0xdf,0xaf,0x4d,0xe5,0x66,0x9c,0x78,0x28,0x0b,0x17,0xec,0x6e,0x66,0x2a,0x1d,0xeb,0x2a,0x60,0xa7,0x7d,0xab,0xa6,0x10,0x46,0x13},
+ {0xfe,0xb0,0xf6,0x8d,0xc7,0x8e,0x13,0x51,0x1b,0xf5,0x75,0xe5,0x89,0xda,0x97,0x53,0xb9,0xf1,0x7a,0x71,0x1d,0x7a,0x20,0x09,0x50,0xd6,0x20,0x2b,0xba,0xfd,0x02,0x21,0x15,0xf5,0xd1,0x77,0xe7,0x65,0x2a,0xcd,0xf1,0x60,0xaa,0x8f,0x87,0x91,0x89,0x54,0xe5,0x06,0xbc,0xda,0xbc,0x3b,0xb7,0xb1,0xfb,0xc9,0x7c,0xa9,0xcb,0x78,0x48,0x65,0xa1,0xe6,0x5c,0x05,0x05,0xe4,0x9e,0x96,0x29,0xad,0x51,0x12,0x68,0xa7,0xbc,0x36,0x15,0xa4,0x7d,0xaa,0x17,0xf5,0x1a,0x3a,0xba,0xb2,0xec,0x29,0xdb,0x25,0xd7,0x0a},
+ {0x57,0x24,0x4e,0x83,0xb1,0x67,0x42,0xdc,0xc5,0x1b,0xce,0x70,0xb5,0x44,0x75,0xb6,0xd7,0x5e,0xd1,0xf7,0x0b,0x7a,0xf0,0x1a,0x50,0x36,0xa0,0x71,0xfb,0xcf,0xef,0x4a,0x85,0x6f,0x05,0x9b,0x0c,0xbc,0xc7,0xfe,0xd7,0xff,0xf5,0xe7,0x68,0x52,0x7d,0x53,0xfa,0xae,0x12,0x43,0x62,0xc6,0xaf,0x77,0xd9,0x9f,0x39,0x02,0x53,0x5f,0x67,0x4f,0x1e,0x17,0x15,0x04,0x36,0x36,0x2d,0xc3,0x3b,0x48,0x98,0x89,0x11,0xef,0x2b,0xcd,0x10,0x51,0x94,0xd0,0xad,0x6e,0x0a,0x87,0x61,0x65,0xa8,0xa2,0x72,0xbb,0xcc,0x0b},
+ {0xc8,0xa9,0xb1,0xea,0x2f,0x96,0x5e,0x18,0xcd,0x7d,0x14,0x65,0x35,0xe6,0xe7,0x86,0xf2,0x6d,0x5b,0xbb,0x31,0xe0,0x92,0xb0,0x3e,0xb7,0xd6,0x59,0xab,0xf0,0x24,0x40,0x96,0x12,0xfe,0x50,0x4c,0x5e,0x6d,0x18,0x7e,0x9f,0xe8,0xfe,0x82,0x7b,0x39,0xe0,0xb0,0x31,0x70,0x50,0xc5,0xf6,0xc7,0x3b,0xc2,0x37,0x8f,0x10,0x69,0xfd,0x78,0x66,0xc2,0x63,0x68,0x63,0x31,0xfa,0x86,0x15,0xf2,0x33,0x2d,0x57,0x48,0x8c,0xf6,0x07,0xfc,0xae,0x9e,0x78,0x9f,0xcc,0x73,0x4f,0x01,0x47,0xad,0x8e,0x10,0xe2,0x42,0x2d},
+ {0x9b,0xd2,0xdf,0x94,0x15,0x13,0xf5,0x97,0x6a,0x4c,0x3f,0x31,0x5d,0x98,0x55,0x61,0x10,0x50,0x45,0x08,0x07,0x3f,0xa1,0xeb,0x22,0xd3,0xd2,0xb8,0x08,0x26,0x6b,0x67,0x93,0x75,0x53,0x0f,0x0d,0x7b,0x71,0x21,0x4c,0x06,0x1e,0x13,0x0b,0x69,0x4e,0x91,0x9f,0xe0,0x2a,0x75,0xae,0x87,0xb6,0x1b,0x6e,0x3c,0x42,0x9b,0xa7,0xf3,0x0b,0x42,0x47,0x2b,0x5b,0x1c,0x65,0xba,0x38,0x81,0x80,0x1b,0x1b,0x31,0xec,0xb6,0x71,0x86,0xb0,0x35,0x31,0xbc,0xb1,0x0c,0xff,0x7b,0xe0,0xf1,0x0c,0x9c,0xfa,0x2f,0x5d,0x74},
+ {0xbd,0xc8,0xc9,0x2b,0x1e,0x5a,0x52,0xbf,0x81,0x9d,0x47,0x26,0x08,0x26,0x5b,0xea,0xdb,0x55,0x01,0xdf,0x0e,0xc7,0x11,0xd5,0xd0,0xf5,0x0c,0x96,0xeb,0x3c,0xe2,0x1a,0x6a,0x4e,0xd3,0x21,0x57,0xdf,0x36,0x60,0xd0,0xb3,0x7b,0x99,0x27,0x88,0xdb,0xb1,0xfa,0x6a,0x75,0xc8,0xc3,0x09,0xc2,0xd3,0x39,0xc8,0x1d,0x4c,0xe5,0x5b,0xe1,0x06,0x4a,0x99,0x32,0x19,0x87,0x5d,0x72,0x5b,0xb0,0xda,0xb1,0xce,0xb5,0x1c,0x35,0x32,0x05,0xca,0xb7,0xda,0x49,0x15,0xc4,0x7d,0xf7,0xc1,0x8e,0x27,0x61,0xd8,0xde,0x58},
+ {0x5c,0xc5,0x66,0xf2,0x93,0x37,0x17,0xd8,0x49,0x4e,0x45,0xcc,0xc5,0x76,0xc9,0xc8,0xa8,0xc3,0x26,0xbc,0xf8,0x82,0xe3,0x5c,0xf9,0xf6,0x85,0x54,0xe8,0x9d,0xf3,0x2f,0xa8,0xc9,0xc2,0xb6,0xa8,0x5b,0xfb,0x2d,0x8c,0x59,0x2c,0xf5,0x8e,0xef,0xee,0x48,0x73,0x15,0x2d,0xf1,0x07,0x91,0x80,0x33,0xd8,0x5b,0x1d,0x53,0x6b,0x69,0xba,0x08,0x7a,0xc5,0xef,0xc3,0xee,0x3e,0xed,0x77,0x11,0x48,0xff,0xd4,0x17,0x55,0xe0,0x04,0xcb,0x71,0xa6,0xf1,0x3f,0x7a,0x3d,0xea,0x54,0xfe,0x7c,0x94,0xb4,0x33,0x06,0x12},
+ {0x42,0x00,0x61,0x91,0x78,0x98,0x94,0x0b,0xe8,0xfa,0xeb,0xec,0x3c,0xb1,0xe7,0x4e,0xc0,0xa4,0xf0,0x94,0x95,0x73,0xbe,0x70,0x85,0x91,0xd5,0xb4,0x99,0x0a,0xd3,0x35,0x0a,0x10,0x12,0x49,0x47,0x31,0xbd,0x82,0x06,0xbe,0x6f,0x7e,0x6d,0x7b,0x23,0xde,0xc6,0x79,0xea,0x11,0x19,0x76,0x1e,0xe1,0xde,0x3b,0x39,0xcb,0xe3,0x3b,0x43,0x07,0xf4,0x97,0xe9,0x5c,0xc0,0x44,0x79,0xff,0xa3,0x51,0x5c,0xb0,0xe4,0x3d,0x5d,0x57,0x7c,0x84,0x76,0x5a,0xfd,0x81,0x33,0x58,0x9f,0xda,0xf6,0x7a,0xde,0x3e,0x87,0x2d},
+ {0x09,0x34,0x37,0x43,0x64,0x31,0x7a,0x15,0xd9,0x81,0xaa,0xf4,0xee,0xb7,0xb8,0xfa,0x06,0x48,0xa6,0xf5,0xe6,0xfe,0x93,0xb0,0xb6,0xa7,0x7f,0x70,0x54,0x36,0x77,0x2e,0x81,0xf9,0x5d,0x4e,0xe1,0x02,0x62,0xaa,0xf5,0xe1,0x15,0x50,0x17,0x59,0x0d,0xa2,0x6c,0x1d,0xe2,0xba,0xd3,0x75,0xa2,0x18,0x53,0x02,0x60,0x01,0x8a,0x61,0x43,0x05,0xc1,0x23,0x4c,0x97,0xf4,0xbd,0xea,0x0d,0x93,0x46,0xce,0x9d,0x25,0x0a,0x6f,0xaa,0x2c,0xba,0x9a,0xa2,0xb8,0x2c,0x20,0x04,0x0d,0x96,0x07,0x2d,0x36,0x43,0x14,0x4b},
+ {0x7a,0x1f,0x6e,0xb6,0xc7,0xb7,0xc4,0xcc,0x7e,0x2f,0x0c,0xf5,0x25,0x7e,0x15,0x44,0x1c,0xaf,0x3e,0x71,0xfc,0x6d,0xf0,0x3e,0xf7,0x63,0xda,0x52,0x67,0x44,0x2f,0x58,0xcb,0x9c,0x52,0x1c,0xe9,0x54,0x7c,0x96,0xfb,0x35,0xc6,0x64,0x92,0x26,0xf6,0x30,0x65,0x19,0x12,0x78,0xf4,0xaf,0x47,0x27,0x5c,0x6f,0xf6,0xea,0x18,0x84,0x03,0x17,0xe4,0x4c,0x32,0x20,0xd3,0x7b,0x31,0xc6,0xc4,0x8b,0x48,0xa4,0xe8,0x42,0x10,0xa8,0x64,0x13,0x5a,0x4e,0x8b,0xf1,0x1e,0xb2,0xc9,0x8d,0xa2,0xcd,0x4b,0x1c,0x2a,0x0c},
+ {0x47,0x04,0x1f,0x6f,0xd0,0xc7,0x4d,0xd2,0x59,0xc0,0x87,0xdb,0x3e,0x9e,0x26,0xb2,0x8f,0xd2,0xb2,0xfb,0x72,0x02,0x5b,0xd1,0x77,0x48,0xf6,0xc6,0xd1,0x8b,0x55,0x7c,0x45,0x69,0xbd,0x69,0x48,0x81,0xc4,0xed,0x22,0x8d,0x1c,0xbe,0x7d,0x90,0x6d,0x0d,0xab,0xc5,0x5c,0xd5,0x12,0xd2,0x3b,0xc6,0x83,0xdc,0x14,0xa3,0x30,0x9b,0x6a,0x5a,0x3d,0x46,0x96,0xd3,0x24,0x15,0xec,0xd0,0xf0,0x24,0x5a,0xc3,0x8a,0x62,0xbb,0x12,0xa4,0x5f,0xbc,0x1c,0x79,0x3a,0x0c,0xa5,0xc3,0xaf,0xfb,0x0a,0xca,0xa5,0x04,0x04},
+ {0xd6,0x43,0xa7,0x0a,0x07,0x40,0x1f,0x8c,0xe8,0x5e,0x26,0x5b,0xcb,0xd0,0xba,0xcc,0xde,0xd2,0x8f,0x66,0x6b,0x04,0x4b,0x57,0x33,0x96,0xdd,0xca,0xfd,0x5b,0x39,0x46,0xd1,0x6f,0x41,0x2a,0x1b,0x9e,0xbc,0x62,0x8b,0x59,0x50,0xe3,0x28,0xf7,0xc6,0xb5,0x67,0x69,0x5d,0x3d,0xd8,0x3f,0x34,0x04,0x98,0xee,0xf8,0xe7,0x16,0x75,0x52,0x39,0x9c,0x9a,0x5d,0x1a,0x2d,0xdb,0x7f,0x11,0x2a,0x5c,0x00,0xd1,0xbc,0x45,0x77,0x9c,0xea,0x6f,0xd5,0x54,0xf1,0xbe,0xd4,0xef,0x16,0xd0,0x22,0xe8,0x29,0x9a,0x57,0x76},
+ {0x17,0x2a,0xc0,0x49,0x7e,0x8e,0xb6,0x45,0x7f,0xa3,0xa9,0xbc,0xa2,0x51,0xcd,0x23,0x1b,0x4c,0x22,0xec,0x11,0x5f,0xd6,0x3e,0xb1,0xbd,0x05,0x9e,0xdc,0x84,0xa3,0x43,0xf2,0x34,0xb4,0x52,0x13,0xb5,0x3c,0x33,0xe1,0x80,0xde,0x93,0x49,0x28,0x32,0xd8,0xce,0x35,0x0d,0x75,0x87,0x28,0x51,0xb5,0xc1,0x77,0x27,0x2a,0xbb,0x14,0xc5,0x02,0x45,0xb6,0xf1,0x8b,0xda,0xd5,0x4b,0x68,0x53,0x4b,0xb5,0xf6,0x7e,0xd3,0x8b,0xfb,0x53,0xd2,0xb0,0xa9,0xd7,0x16,0x39,0x31,0x59,0x80,0x54,0x61,0x09,0x92,0x60,0x11},
+ {0xaa,0xcf,0xda,0x29,0x69,0x16,0x4d,0xb4,0x8f,0x59,0x13,0x84,0x4c,0x9f,0x52,0xda,0x59,0x55,0x3d,0x45,0xca,0x63,0xef,0xe9,0x0b,0x8e,0x69,0xc5,0x5b,0x12,0x1e,0x35,0xcd,0x4d,0x9b,0x36,0x16,0x56,0x38,0x7a,0x63,0x35,0x5c,0x65,0xa7,0x2c,0xc0,0x75,0x21,0x80,0xf1,0xd4,0xf9,0x1b,0xc2,0x7d,0x42,0xe0,0xe6,0x91,0x74,0x7d,0x63,0x2f,0xbe,0x7b,0xf6,0x1a,0x46,0x9b,0xb4,0xd4,0x61,0x89,0xab,0xc8,0x7a,0x03,0x03,0xd6,0xfb,0x99,0xa6,0xf9,0x9f,0xe1,0xde,0x71,0x9a,0x2a,0xce,0xe7,0x06,0x2d,0x18,0x7f},
+ {0xec,0x68,0x01,0xab,0x64,0x8e,0x7c,0x7a,0x43,0xc5,0xed,0x15,0x55,0x4a,0x5a,0xcb,0xda,0x0e,0xcd,0x47,0xd3,0x19,0x55,0x09,0xb0,0x93,0x3e,0x34,0x8c,0xac,0xd4,0x67,0x22,0x75,0x21,0x8e,0x72,0x4b,0x45,0x09,0xd8,0xb8,0x84,0xd4,0xf4,0xe8,0x58,0xaa,0x3c,0x90,0x46,0x7f,0x4d,0x25,0x58,0xd3,0x17,0x52,0x1c,0x24,0x43,0xc0,0xac,0x44,0x77,0x57,0x7a,0x4f,0xbb,0x6b,0x7d,0x1c,0xe1,0x13,0x83,0x91,0xd4,0xfe,0x35,0x8b,0x84,0x46,0x6b,0xc9,0xc6,0xa1,0xdc,0x4a,0xbd,0x71,0xad,0x12,0x83,0x1c,0x6d,0x55},
+ {0x82,0x39,0x8d,0x0c,0xe3,0x40,0xef,0x17,0x34,0xfa,0xa3,0x15,0x3e,0x07,0xf7,0x31,0x6e,0x64,0x73,0x07,0xcb,0xf3,0x21,0x4f,0xff,0x4e,0x82,0x1d,0x6d,0x6c,0x6c,0x74,0x21,0xe8,0x1b,0xb1,0x56,0x67,0xf0,0x81,0xdd,0xf3,0xa3,0x10,0x23,0xf8,0xaf,0x0f,0x5d,0x46,0x99,0x6a,0x55,0xd0,0xb2,0xf8,0x05,0x7f,0x8c,0xcc,0x38,0xbe,0x7a,0x09,0xa4,0x2d,0xa5,0x7e,0x87,0xc9,0x49,0x0c,0x43,0x1d,0xdc,0x9b,0x55,0x69,0x43,0x4c,0xd2,0xeb,0xcc,0xf7,0x09,0x38,0x2c,0x02,0xbd,0x84,0xee,0x4b,0xa3,0x14,0x7e,0x57},
+ {0x0a,0x3b,0xa7,0x61,0xac,0x68,0xe2,0xf0,0xf5,0xa5,0x91,0x37,0x10,0xfa,0xfa,0xf2,0xe9,0x00,0x6d,0x6b,0x82,0x3e,0xe1,0xc1,0x42,0x8f,0xd7,0x6f,0xe9,0x7e,0xfa,0x60,0x2b,0xd7,0x4d,0xbd,0xbe,0xce,0xfe,0x94,0x11,0x22,0x0f,0x06,0xda,0x4f,0x6a,0xf4,0xff,0xd1,0xc8,0xc0,0x77,0x59,0x4a,0x12,0x95,0x92,0x00,0xfb,0xb8,0x04,0x53,0x70,0xc6,0x6e,0x29,0x4d,0x35,0x1d,0x3d,0xb6,0xd8,0x31,0xad,0x5f,0x3e,0x05,0xc3,0xf3,0xec,0x42,0xbd,0xb4,0x8c,0x95,0x0b,0x67,0xfd,0x53,0x63,0xa1,0x0c,0x8e,0x39,0x21},
+ {0xf3,0x33,0x2b,0x38,0x8a,0x05,0xf5,0x89,0xb4,0xc0,0x48,0xad,0x0b,0xba,0xe2,0x5a,0x6e,0xb3,0x3d,0xa5,0x03,0xb5,0x93,0x8f,0xe6,0x32,0xa2,0x95,0x9d,0xed,0xa3,0x5a,0x01,0x56,0xb7,0xb4,0xf9,0xaa,0x98,0x27,0x72,0xad,0x8d,0x5c,0x13,0x72,0xac,0x5e,0x23,0xa0,0xb7,0x61,0x61,0xaa,0xce,0xd2,0x4e,0x7d,0x8f,0xe9,0x84,0xb2,0xbf,0x1b,0x61,0x65,0xd9,0xc7,0xe9,0x77,0x67,0x65,0x36,0x80,0xc7,0x72,0x54,0x12,0x2b,0xcb,0xee,0x6e,0x50,0xd9,0x99,0x32,0x05,0x65,0xcc,0x57,0x89,0x5e,0x4e,0xe1,0x07,0x4a},
+ {0x99,0xf9,0x0d,0x98,0xcb,0x12,0xe4,0x4e,0x71,0xc7,0x6e,0x3c,0x6f,0xd7,0x15,0xa3,0xfd,0x77,0x5c,0x92,0xde,0xed,0xa5,0xbb,0x02,0x34,0x31,0x1d,0x39,0xac,0x0b,0x3f,0x9b,0xa4,0x77,0xc4,0xcd,0x58,0x0b,0x24,0x17,0xf0,0x47,0x64,0xde,0xda,0x38,0xfd,0xad,0x6a,0xc8,0xa7,0x32,0x8d,0x92,0x19,0x81,0xa0,0xaf,0x84,0xed,0x7a,0xaf,0x50,0xe5,0x5b,0xf6,0x15,0x01,0xde,0x4f,0x6e,0xb2,0x09,0x61,0x21,0x21,0x26,0x98,0x29,0xd9,0xd6,0xad,0x0b,0x81,0x05,0x02,0x78,0x06,0xd0,0xeb,0xba,0x16,0xa3,0x21,0x19},
+ {0xfc,0x70,0xb8,0xdf,0x7e,0x2f,0x42,0x89,0xbd,0xb3,0x76,0x4f,0xeb,0x6b,0x29,0x2c,0xf7,0x4d,0xc2,0x36,0xd4,0xf1,0x38,0x07,0xb0,0xae,0x73,0xe2,0x41,0xdf,0x58,0x64,0x8b,0xc1,0xf3,0xd9,0x9a,0xad,0x5a,0xd7,0x9c,0xc1,0xb1,0x60,0xef,0x0e,0x6a,0x56,0xd9,0x0e,0x5c,0x25,0xac,0x0b,0x9a,0x3e,0xf5,0xc7,0x62,0xa0,0xec,0x9d,0x04,0x7b,0x83,0x44,0x44,0x35,0x7a,0xe3,0xcb,0xdc,0x93,0xbe,0xed,0x0f,0x33,0x79,0x88,0x75,0x87,0xdd,0xc5,0x12,0xc3,0x04,0x60,0x78,0x64,0x0e,0x95,0xc2,0xcb,0xdc,0x93,0x60},
+ {0x6d,0x70,0xe0,0x85,0x85,0x9a,0xf3,0x1f,0x33,0x39,0xe7,0xb3,0xd8,0xa5,0xd0,0x36,0x3b,0x45,0x8f,0x71,0xe1,0xf2,0xb9,0x43,0x7c,0xa9,0x27,0x48,0x08,0xea,0xd1,0x57,0x4b,0x03,0x84,0x60,0xbe,0xee,0xde,0x6b,0x54,0xb8,0x0f,0x78,0xb6,0xc2,0x99,0x31,0x95,0x06,0x2d,0xb6,0xab,0x76,0x33,0x97,0x90,0x7d,0x64,0x8b,0xc9,0x80,0x31,0x6e,0x71,0xb0,0x28,0xa1,0xe7,0xb6,0x7a,0xee,0xaa,0x8b,0xa8,0x93,0x6d,0x59,0xc1,0xa4,0x30,0x61,0x21,0xb2,0x82,0xde,0xb4,0xf7,0x18,0xbd,0x97,0xdd,0x9d,0x99,0x3e,0x36},
+ {0xc4,0x1f,0xee,0x35,0xc1,0x43,0xa8,0x96,0xcf,0xc8,0xe4,0x08,0x55,0xb3,0x6e,0x97,0x30,0xd3,0x8c,0xb5,0x01,0x68,0x2f,0xb4,0x2b,0x05,0x3a,0x69,0x78,0x9b,0xee,0x48,0xc6,0xae,0x4b,0xe2,0xdc,0x48,0x18,0x2f,0x60,0xaf,0xbc,0xba,0x55,0x72,0x9b,0x76,0x31,0xe9,0xef,0x3c,0x6e,0x3c,0xcb,0x90,0x55,0xb3,0xf9,0xc6,0x9b,0x97,0x1f,0x23,0xc6,0xf3,0x2a,0xcc,0x4b,0xde,0x31,0x5c,0x1f,0x8d,0x20,0xfe,0x30,0xb0,0x4b,0xb0,0x66,0xb4,0x4f,0xc1,0x09,0x70,0x8d,0xb7,0x13,0x24,0x79,0x08,0x9b,0xfa,0x9b,0x07},
+ {0xf4,0x0d,0x30,0xda,0x51,0x3a,0x90,0xe3,0xb0,0x5a,0xa9,0x3d,0x23,0x64,0x39,0x84,0x80,0x64,0x35,0x0b,0x2d,0xf1,0x3c,0xed,0x94,0x71,0x81,0x84,0xf6,0x77,0x8c,0x03,0x45,0x42,0xd5,0xa2,0x80,0xed,0xc9,0xf3,0x52,0x39,0xf6,0x77,0x78,0x8b,0xa0,0x0a,0x75,0x54,0x08,0xd1,0x63,0xac,0x6d,0xd7,0x6b,0x63,0x70,0x94,0x15,0xfb,0xf4,0x1e,0xec,0x7b,0x16,0x5b,0xe6,0x5e,0x4e,0x85,0xc2,0xcd,0xd0,0x96,0x42,0x0a,0x59,0x59,0x99,0x21,0x10,0x98,0x34,0xdf,0xb2,0x72,0x56,0xff,0x0b,0x4a,0x2a,0xe9,0x5e,0x57},
+ {0xcf,0x2f,0x18,0x8a,0x90,0x80,0xc0,0xd4,0xbd,0x9d,0x48,0x99,0xc2,0x70,0xe1,0x30,0xde,0x33,0xf7,0x52,0x57,0xbd,0xba,0x05,0x00,0xfd,0xd3,0x2c,0x11,0xe7,0xd4,0x43,0x01,0xd8,0xa4,0x0a,0x45,0xbc,0x46,0x5d,0xd8,0xb9,0x33,0xa5,0x27,0x12,0xaf,0xc3,0xc2,0x06,0x89,0x2b,0x26,0x3b,0x9e,0x38,0x1b,0x58,0x2f,0x38,0x7e,0x1e,0x0a,0x20,0xc5,0x3a,0xf9,0xea,0x67,0xb9,0x8d,0x51,0xc0,0x52,0x66,0x05,0x9b,0x98,0xbc,0x71,0xf5,0x97,0x71,0x56,0xd9,0x85,0x2b,0xfe,0x38,0x4e,0x1e,0x65,0x52,0xca,0x0e,0x05},
+ {0x9c,0x0c,0x3f,0x45,0xde,0x1a,0x43,0xc3,0x9b,0x3b,0x70,0xff,0x5e,0x04,0xf5,0xe9,0x3d,0x7b,0x84,0xed,0xc9,0x7a,0xd9,0xfc,0xc6,0xf4,0x58,0x1c,0xc2,0xe6,0x0e,0x4b,0xea,0x68,0xe6,0x60,0x76,0x39,0xac,0x97,0x97,0xb4,0x3a,0x15,0xfe,0xbb,0x19,0x9b,0x9f,0xa7,0xec,0x34,0xb5,0x79,0xb1,0x4c,0x57,0xae,0x31,0xa1,0x9f,0xc0,0x51,0x61,0x96,0x5d,0xf0,0xfd,0x0d,0x5c,0xf5,0x3a,0x7a,0xee,0xb4,0x2a,0xe0,0x2e,0x26,0xdd,0x09,0x17,0x17,0x12,0x87,0xbb,0xb2,0x11,0x0b,0x03,0x0f,0x80,0xfa,0x24,0xef,0x1f},
+ {0x96,0x31,0xa7,0x1a,0xfb,0x53,0xd6,0x37,0x18,0x64,0xd7,0x3f,0x30,0x95,0x94,0x0f,0xb2,0x17,0x3a,0xfb,0x09,0x0b,0x20,0xad,0x3e,0x61,0xc8,0x2f,0x29,0x49,0x4d,0x54,0x86,0x6b,0x97,0x30,0xf5,0xaf,0xd2,0x22,0x04,0x46,0xd2,0xc2,0x06,0xb8,0x90,0x8d,0xe5,0xba,0xe5,0x4d,0x6c,0x89,0xa1,0xdc,0x17,0x0c,0x34,0xc8,0xe6,0x5f,0x00,0x28,0x88,0x86,0x52,0x34,0x9f,0xba,0xef,0x6a,0xa1,0x7d,0x10,0x25,0x94,0xff,0x1b,0x5c,0x36,0x4b,0xd9,0x66,0xcd,0xbb,0x5b,0xf7,0xfa,0x6d,0x31,0x0f,0x93,0x72,0xe4,0x72},
+ {0x4f,0x08,0x81,0x97,0x8c,0x20,0x95,0x26,0xe1,0x0e,0x45,0x23,0x0b,0x2a,0x50,0xb1,0x02,0xde,0xef,0x03,0xa6,0xae,0x9d,0xfd,0x4c,0xa3,0x33,0x27,0x8c,0x2e,0x9d,0x5a,0x27,0x76,0x2a,0xd3,0x35,0xf6,0xf3,0x07,0xf0,0x66,0x65,0x5f,0x86,0x4d,0xaa,0x7a,0x50,0x44,0xd0,0x28,0x97,0xe7,0x85,0x3c,0x38,0x64,0xe0,0x0f,0x00,0x7f,0xee,0x1f,0xe5,0xf7,0xdb,0x03,0xda,0x05,0x53,0x76,0xbd,0xcd,0x34,0x14,0x49,0xf2,0xda,0xa4,0xec,0x88,0x4a,0xd2,0xcd,0xd5,0x4a,0x7b,0x43,0x05,0x04,0xee,0x51,0x40,0xf9,0x00},
+ {0xb2,0x30,0xd3,0xc3,0x23,0x6b,0x35,0x8d,0x06,0x1b,0x47,0xb0,0x9b,0x8b,0x1c,0xf2,0x3c,0xb8,0x42,0x6e,0x6c,0x31,0x6c,0xb3,0x0d,0xb1,0xea,0x8b,0x7e,0x9c,0xd7,0x07,0x53,0x97,0xaf,0x07,0xbb,0x93,0xef,0xd7,0xa7,0x66,0xb7,0x3d,0xcf,0xd0,0x3e,0x58,0xc5,0x1e,0x0b,0x6e,0xbf,0x98,0x69,0xce,0x52,0x04,0xd4,0x5d,0xd2,0xff,0xb7,0x47,0x12,0xdd,0x08,0xbc,0x9c,0xfb,0xfb,0x87,0x9b,0xc2,0xee,0xe1,0x3a,0x6b,0x06,0x8a,0xbf,0xc1,0x1f,0xdb,0x2b,0x24,0x57,0x0d,0xb6,0x4b,0xa6,0x5e,0xa3,0x20,0x35,0x1c},
+ {0x4a,0xa3,0xcb,0xbc,0xa6,0x53,0xd2,0x80,0x9b,0x21,0x38,0x38,0xa1,0xc3,0x61,0x3e,0x96,0xe3,0x82,0x98,0x01,0xb6,0xc3,0x90,0x6f,0xe6,0x0e,0x5d,0x77,0x05,0x3d,0x1c,0x59,0xc0,0x6b,0x21,0x40,0x6f,0xa8,0xcd,0x7e,0xd8,0xbc,0x12,0x1d,0x23,0xbb,0x1f,0x90,0x09,0xc7,0x17,0x9e,0x6a,0x95,0xb4,0x55,0x2e,0xd1,0x66,0x3b,0x0c,0x75,0x38,0x1a,0xe5,0x22,0x94,0x40,0xf1,0x2e,0x69,0x71,0xf6,0x5d,0x2b,0x3c,0xc7,0xc0,0xcb,0x29,0xe0,0x4c,0x74,0xe7,0x4f,0x01,0x21,0x7c,0x48,0x30,0xd3,0xc7,0xe2,0x21,0x06},
+ {0x8d,0x83,0x59,0x82,0xcc,0x60,0x98,0xaf,0xdc,0x9a,0x9f,0xc6,0xc1,0x48,0xea,0x90,0x30,0x1e,0x58,0x65,0x37,0x48,0x26,0x65,0xbc,0xa5,0xd3,0x7b,0x09,0xd6,0x07,0x00,0xf3,0xf0,0xdb,0xb0,0x96,0x17,0xae,0xb7,0x96,0xe1,0x7c,0xe1,0xb9,0xaf,0xdf,0x54,0xb4,0xa3,0xaa,0xe9,0x71,0x30,0x92,0x25,0x9d,0x2e,0x00,0xa1,0x9c,0x58,0x8e,0x5d,0x4b,0xa9,0x42,0x08,0x95,0x1d,0xbf,0xc0,0x3e,0x2e,0x8f,0x58,0x63,0xc3,0xd3,0xb2,0xef,0xe2,0x51,0xbb,0x38,0x14,0x96,0x0a,0x86,0xbf,0x1c,0x3c,0x78,0xd7,0x83,0x15},
+ {0xe1,0x7a,0xa2,0x5d,0xef,0xa2,0xee,0xec,0x74,0x01,0x67,0x55,0x14,0x3a,0x7c,0x59,0x7a,0x16,0x09,0x66,0x12,0x2a,0xa6,0xc9,0x70,0x8f,0xed,0x81,0x2e,0x5f,0x2a,0x25,0xc7,0x28,0x9d,0xcc,0x04,0x47,0x03,0x90,0x8f,0xc5,0x2c,0xf7,0x9e,0x67,0x1b,0x1d,0x26,0x87,0x5b,0xbe,0x5f,0x2b,0xe1,0x16,0x0a,0x58,0xc5,0x83,0x4e,0x06,0x58,0x49,0x0d,0xe8,0x66,0x50,0x26,0x94,0x28,0x0d,0x6b,0x8c,0x7c,0x30,0x85,0xf7,0xc3,0xfc,0xfd,0x12,0x11,0x0c,0x78,0xda,0x53,0x1b,0x88,0xb3,0x43,0xd8,0x0b,0x17,0x9c,0x07},
+ {0xff,0x6f,0xfa,0x64,0xe4,0xec,0x06,0x05,0x23,0xe5,0x05,0x62,0x1e,0x43,0xe3,0xbe,0x42,0xea,0xb8,0x51,0x24,0x42,0x79,0x35,0x00,0xfb,0xc9,0x4a,0xe3,0x05,0xec,0x6d,0x56,0xd0,0xd5,0xc0,0x50,0xcd,0xd6,0xcd,0x3b,0x57,0x03,0xbb,0x6d,0x68,0xf7,0x9a,0x48,0xef,0xc3,0xf3,0x3f,0x72,0xa6,0x3c,0xcc,0x8a,0x7b,0x31,0xd7,0xc0,0x68,0x67,0xb3,0xc1,0x55,0xf1,0xe5,0x25,0xb6,0x94,0x91,0x7b,0x7b,0x99,0xa7,0xf3,0x7b,0x41,0x00,0x26,0x6b,0x6d,0xdc,0xbd,0x2c,0xc2,0xf4,0x52,0xcd,0xdd,0x14,0x5e,0x44,0x51},
+ {0x51,0x49,0x14,0x3b,0x4b,0x2b,0x50,0x57,0xb3,0xbc,0x4b,0x44,0x6b,0xff,0x67,0x8e,0xdb,0x85,0x63,0x16,0x27,0x69,0xbd,0xb8,0xc8,0x95,0x92,0xe3,0x31,0x6f,0x18,0x13,0x55,0xa4,0xbe,0x2b,0xab,0x47,0x31,0x89,0x29,0x91,0x07,0x92,0x4f,0xa2,0x53,0x8c,0xa7,0xf7,0x30,0xbe,0x48,0xf9,0x49,0x4b,0x3d,0xd4,0x4f,0x6e,0x08,0x90,0xe9,0x12,0x2e,0xbb,0xdf,0x7f,0xb3,0x96,0x0c,0xf1,0xf9,0xea,0x1c,0x12,0x5e,0x93,0x9a,0x9f,0x3f,0x98,0x5b,0x3a,0xc4,0x36,0x11,0xdf,0xaf,0x99,0x3e,0x5d,0xf0,0xe3,0xb2,0x77},
+ {0xde,0xc4,0x2e,0x9c,0xc5,0xa9,0x6f,0x29,0xcb,0xf3,0x84,0x4f,0xbf,0x61,0x8b,0xbc,0x08,0xf9,0xa8,0x17,0xd9,0x06,0x77,0x1c,0x5d,0x25,0xd3,0x7a,0xfc,0x95,0xb7,0x63,0xa4,0xb0,0xdd,0x12,0x9c,0x63,0x98,0xd5,0x6b,0x86,0x24,0xc0,0x30,0x9f,0xd1,0xa5,0x60,0xe4,0xfc,0x58,0x03,0x2f,0x7c,0xd1,0x8a,0x5e,0x09,0x2e,0x15,0x95,0xa1,0x07,0xc8,0x5f,0x9e,0x38,0x02,0x8f,0x36,0xa8,0x3b,0xe4,0x8d,0xcf,0x02,0x3b,0x43,0x90,0x43,0x26,0x41,0xc5,0x5d,0xfd,0xa1,0xaf,0x37,0x01,0x2f,0x03,0x3d,0xe8,0x8f,0x3e},
+ {0x94,0xa2,0x70,0x05,0xb9,0x15,0x8b,0x2f,0x49,0x45,0x08,0x67,0x70,0x42,0xf2,0x94,0x84,0xfd,0xbb,0x61,0xe1,0x5a,0x1c,0xde,0x07,0x40,0xac,0x7f,0x79,0x3b,0xba,0x75,0x3c,0xd1,0xef,0xe8,0x8d,0x4c,0x70,0x08,0x31,0x37,0xe0,0x33,0x8e,0x1a,0xc5,0xdf,0xe3,0xcd,0x60,0x12,0xa5,0x5d,0x9d,0xa5,0x86,0x8c,0x25,0xa6,0x99,0x08,0xd6,0x22,0x96,0xd1,0xcd,0x70,0xc0,0xdb,0x39,0x62,0x9a,0x8a,0x7d,0x6c,0x8b,0x8a,0xfe,0x60,0x60,0x12,0x40,0xeb,0xbc,0x47,0x88,0xb3,0x5e,0x9e,0x77,0x87,0x7b,0xd0,0x04,0x09},
+ {0x9c,0x91,0xba,0xdd,0xd4,0x1f,0xce,0xb4,0xaa,0x8d,0x4c,0xc7,0x3e,0xdb,0x31,0xcf,0x51,0xcc,0x86,0xad,0x63,0xcc,0x63,0x2c,0x07,0xde,0x1d,0xbc,0x3f,0x14,0xe2,0x43,0xb9,0x40,0xf9,0x48,0x66,0x2d,0x32,0xf4,0x39,0x0c,0x2d,0xbd,0x0c,0x2f,0x95,0x06,0x31,0xf9,0x81,0xa0,0xad,0x97,0x76,0x16,0x6c,0x2a,0xf7,0xba,0xce,0xaa,0x40,0x62,0xa0,0x95,0xa2,0x5b,0x9c,0x74,0x34,0xf8,0x5a,0xd2,0x37,0xca,0x5b,0x7c,0x94,0xd6,0x6a,0x31,0xc9,0xe7,0xa7,0x3b,0xf1,0x66,0xac,0x0c,0xb4,0x8d,0x23,0xaf,0xbd,0x56},
+ {0xeb,0x33,0x35,0xf5,0xe3,0xb9,0x2a,0x36,0x40,0x3d,0xb9,0x6e,0xd5,0x68,0x85,0x33,0x72,0x55,0x5a,0x1d,0x52,0x14,0x0e,0x9e,0x18,0x13,0x74,0x83,0x6d,0xa8,0x24,0x1d,0xb2,0x3b,0x9d,0xc1,0x6c,0xd3,0x10,0x13,0xb9,0x86,0x23,0x62,0xb7,0x6b,0x2a,0x06,0x5c,0x4f,0xa1,0xd7,0x91,0x85,0x9b,0x7c,0x54,0x57,0x1e,0x7e,0x50,0x31,0xaa,0x03,0x1f,0xce,0xd4,0xff,0x48,0x76,0xec,0xf4,0x1c,0x8c,0xac,0x54,0xf0,0xea,0x45,0xe0,0x7c,0x35,0x09,0x1d,0x82,0x25,0xd2,0x88,0x59,0x48,0xeb,0x9a,0xdc,0x61,0xb2,0x43},
+ {0xbb,0x79,0xbb,0x88,0x19,0x1e,0x5b,0xe5,0x9d,0x35,0x7a,0xc1,0x7d,0xd0,0x9e,0xa0,0x33,0xea,0x3d,0x60,0xe2,0x2e,0x2c,0xb0,0xc2,0x6b,0x27,0x5b,0xcf,0x55,0x60,0x32,0x64,0x13,0x95,0x6c,0x8b,0x3d,0x51,0x19,0x7b,0xf4,0x0b,0x00,0x26,0x71,0xfe,0x94,0x67,0x95,0x4f,0xd5,0xdd,0x10,0x8d,0x02,0x64,0x09,0x94,0x42,0xe2,0xd5,0xb4,0x02,0xf2,0x8d,0xd1,0x28,0xcb,0x55,0xa1,0xb4,0x08,0xe5,0x6c,0x18,0x46,0x46,0xcc,0xea,0x89,0x43,0x82,0x6c,0x93,0xf4,0x9c,0xc4,0x10,0x34,0x5d,0xae,0x09,0xc8,0xa6,0x27},
+ {0x88,0xb1,0x0d,0x1f,0xcd,0xeb,0xa6,0x8b,0xe8,0x5b,0x5a,0x67,0x3a,0xd7,0xd3,0x37,0x5a,0x58,0xf5,0x15,0xa3,0xdf,0x2e,0xf2,0x7e,0xa1,0x60,0xff,0x74,0x71,0xb6,0x2c,0x54,0x69,0x3d,0xc4,0x0a,0x27,0x2c,0xcd,0xb2,0xca,0x66,0x6a,0x57,0x3e,0x4a,0xdd,0x6c,0x03,0xd7,0x69,0x24,0x59,0xfa,0x79,0x99,0x25,0x8c,0x3d,0x60,0x03,0x15,0x22,0xd0,0xe1,0x0b,0x39,0xf9,0xcd,0xee,0x59,0xf1,0xe3,0x8c,0x72,0x44,0x20,0x42,0xa9,0xf4,0xf0,0x94,0x7a,0x66,0x1c,0x89,0x82,0x36,0xf4,0x90,0x38,0xb7,0xf4,0x1d,0x7b},
+ {0x24,0xa2,0xb2,0xb3,0xe0,0xf2,0x92,0xe4,0x60,0x11,0x55,0x2b,0x06,0x9e,0x6c,0x7c,0x0e,0x7b,0x7f,0x0d,0xe2,0x8f,0xeb,0x15,0x92,0x59,0xfc,0x58,0x26,0xef,0xfc,0x61,0x8c,0xf5,0xf8,0x07,0x18,0x22,0x2e,0x5f,0xd4,0x09,0x94,0xd4,0x9f,0x5c,0x55,0xe3,0x30,0xa6,0xb6,0x1f,0x8d,0xa8,0xaa,0xb2,0x3d,0xe0,0x52,0xd3,0x45,0x82,0x69,0x68,0x7a,0x18,0x18,0x2a,0x85,0x5d,0xb1,0xdb,0xd7,0xac,0xdd,0x86,0xd3,0xaa,0xe4,0xf3,0x82,0xc4,0xf6,0x0f,0x81,0xe2,0xba,0x44,0xcf,0x01,0xaf,0x3d,0x47,0x4c,0xcf,0x46},
+ {0xf9,0xe5,0xc4,0x9e,0xed,0x25,0x65,0x42,0x03,0x33,0x90,0x16,0x01,0xda,0x5e,0x0e,0xdc,0xca,0xe5,0xcb,0xf2,0xa7,0xb1,0x72,0x40,0x5f,0xeb,0x14,0xcd,0x7b,0x38,0x29,0x40,0x81,0x49,0xf1,0xa7,0x6e,0x3c,0x21,0x54,0x48,0x2b,0x39,0xf8,0x7e,0x1e,0x7c,0xba,0xce,0x29,0x56,0x8c,0xc3,0x88,0x24,0xbb,0xc5,0x8c,0x0d,0xe5,0xaa,0x65,0x10,0x57,0x0d,0x20,0xdf,0x25,0x45,0x2c,0x1c,0x4a,0x67,0xca,0xbf,0xd6,0x2d,0x3b,0x5c,0x30,0x40,0x83,0xe1,0xb1,0xe7,0x07,0x0a,0x16,0xe7,0x1c,0x4f,0xe6,0x98,0xa1,0x69},
+ {0xbc,0x78,0x1a,0xd9,0xe0,0xb2,0x62,0x90,0x67,0x96,0x50,0xc8,0x9c,0x88,0xc9,0x47,0xb8,0x70,0x50,0x40,0x66,0x4a,0xf5,0x9d,0xbf,0xa1,0x93,0x24,0xa9,0xe6,0x69,0x73,0xed,0xca,0xc5,0xdc,0x34,0x44,0x01,0xe1,0x33,0xfb,0x84,0x3c,0x96,0x5d,0xed,0x47,0xe7,0xa0,0x86,0xed,0x76,0x95,0x01,0x70,0xe4,0xf9,0x67,0xd2,0x7b,0x69,0xb2,0x25,0x64,0x68,0x98,0x13,0xfb,0x3f,0x67,0x9d,0xb8,0xc7,0x5d,0x41,0xd9,0xfb,0xa5,0x3c,0x5e,0x3b,0x27,0xdf,0x3b,0xcc,0x4e,0xe0,0xd2,0x4c,0x4e,0xb5,0x3d,0x68,0x20,0x14},
+ {0x97,0xd1,0x9d,0x24,0x1e,0xbd,0x78,0xb4,0x02,0xc1,0x58,0x5e,0x00,0x35,0x0c,0x62,0x5c,0xac,0xba,0xcc,0x2f,0xd3,0x02,0xfb,0x2d,0xa7,0x08,0xf5,0xeb,0x3b,0xb6,0x60,0xd0,0x5a,0xcc,0xc1,0x6f,0xbb,0xee,0x34,0x8b,0xac,0x46,0x96,0xe9,0x0c,0x1b,0x6a,0x53,0xde,0x6b,0xa6,0x49,0xda,0xb0,0xd3,0xc1,0x81,0xd0,0x61,0x41,0x3b,0xe8,0x31,0x4f,0x2b,0x06,0x9e,0x12,0xc7,0xe8,0x97,0xd8,0x0a,0x32,0x29,0x4f,0x8f,0xe4,0x49,0x3f,0x68,0x18,0x6f,0x4b,0xe1,0xec,0x5b,0x17,0x03,0x55,0x2d,0xb6,0x1e,0xcf,0x55},
+ {0x58,0x3d,0xc2,0x65,0x10,0x10,0x79,0x58,0x9c,0x81,0x94,0x50,0x6d,0x08,0x9d,0x8b,0xa7,0x5f,0xc5,0x12,0xa9,0x2f,0x40,0xe2,0xd4,0x91,0x08,0x57,0x64,0x65,0x9a,0x66,0x52,0x8c,0xf5,0x7d,0xe3,0xb5,0x76,0x30,0x36,0xcc,0x99,0xe7,0xdd,0xb9,0x3a,0xd7,0x20,0xee,0x13,0x49,0xe3,0x1c,0x83,0xbd,0x33,0x01,0xba,0x62,0xaa,0xfb,0x56,0x1a,0xec,0xc9,0x9d,0x5c,0x50,0x6b,0x3e,0x94,0x1a,0x37,0x7c,0xa7,0xbb,0x57,0x25,0x30,0x51,0x76,0x34,0x41,0x56,0xae,0x73,0x98,0x5c,0x8a,0xc5,0x99,0x67,0x83,0xc4,0x13},
+ {0xb9,0xe1,0xb3,0x5a,0x46,0x5d,0x3a,0x42,0x61,0x3f,0xf1,0xc7,0x87,0xc1,0x13,0xfc,0xb6,0xb9,0xb5,0xec,0x64,0x36,0xf8,0x19,0x07,0xb6,0x37,0xa6,0x93,0x0c,0xf8,0x66,0x80,0xd0,0x8b,0x5d,0x6a,0xfb,0xdc,0xc4,0x42,0x48,0x1a,0x57,0xec,0xc4,0xeb,0xde,0x65,0x53,0xe5,0xb8,0x83,0xe8,0xb2,0xd4,0x27,0xb8,0xe5,0xc8,0x7d,0xc8,0xbd,0x50,0x11,0xe1,0xdf,0x6e,0x83,0x37,0x6d,0x60,0xd9,0xab,0x11,0xf0,0x15,0x3e,0x35,0x32,0x96,0x3b,0xb7,0x25,0xc3,0x3a,0xb0,0x64,0xae,0xd5,0x5f,0x72,0x44,0x64,0xd5,0x1d},
+ {0x7d,0x12,0x62,0x33,0xf8,0x7f,0xa4,0x8f,0x15,0x7c,0xcd,0x71,0xc4,0x6a,0x9f,0xbc,0x8b,0x0c,0x22,0x49,0x43,0x45,0x71,0x6e,0x2e,0x73,0x9f,0x21,0x12,0x59,0x64,0x0e,0x9a,0xc8,0xba,0x08,0x00,0xe6,0x97,0xc2,0xe0,0xc3,0xe1,0xea,0x11,0xea,0x4c,0x7d,0x7c,0x97,0xe7,0x9f,0xe1,0x8b,0xe3,0xf3,0xcd,0x05,0xa3,0x63,0x0f,0x45,0x3a,0x3a,0x27,0x46,0x39,0xd8,0x31,0x2f,0x8f,0x07,0x10,0xa5,0x94,0xde,0x83,0x31,0x9d,0x38,0x80,0x6f,0x99,0x17,0x6d,0x6c,0xe3,0xd1,0x7b,0xa8,0xa9,0x93,0x93,0x8d,0x8c,0x31},
+ {0x19,0xfe,0xff,0x2a,0x03,0x5d,0x74,0xf2,0x66,0xdb,0x24,0x7f,0x49,0x3c,0x9f,0x0c,0xef,0x98,0x85,0xba,0xe3,0xd3,0x98,0xbc,0x14,0x53,0x1d,0x9a,0x67,0x7c,0x4c,0x22,0x98,0xd3,0x1d,0xab,0x29,0x9e,0x66,0x5d,0x3b,0x9e,0x2d,0x34,0x58,0x16,0x92,0xfc,0xcd,0x73,0x59,0xf3,0xfd,0x1d,0x85,0x55,0xf6,0x0a,0x95,0x25,0xc3,0x41,0x9a,0x50,0xe9,0x25,0xf9,0xa6,0xdc,0x6e,0xc0,0xbd,0x33,0x1f,0x1b,0x64,0xf4,0xf3,0x3e,0x79,0x89,0x3e,0x83,0x9d,0x80,0x12,0xec,0x82,0x89,0x13,0xa1,0x28,0x23,0xf0,0xbf,0x05},
+ {0x0b,0xe0,0xca,0x23,0x70,0x13,0x32,0x36,0x59,0xcf,0xac,0xd1,0x0a,0xcf,0x4a,0x54,0x88,0x1c,0x1a,0xd2,0x49,0x10,0x74,0x96,0xa7,0x44,0x2a,0xfa,0xc3,0x8c,0x0b,0x78,0xe4,0x12,0xc5,0x0d,0xdd,0xa0,0x81,0x68,0xfe,0xfa,0xa5,0x44,0xc8,0x0d,0xe7,0x4f,0x40,0x52,0x4a,0x8f,0x6b,0x8e,0x74,0x1f,0xea,0xa3,0x01,0xee,0xcd,0x77,0x62,0x57,0x5f,0x30,0x4f,0x23,0xbc,0x8a,0xf3,0x1e,0x08,0xde,0x05,0x14,0xbd,0x7f,0x57,0x9a,0x0d,0x2a,0xe6,0x34,0x14,0xa5,0x82,0x5e,0xa1,0xb7,0x71,0x62,0x72,0x18,0xf4,0x5f},
+ {0x9d,0xdb,0x89,0x17,0x0c,0x08,0x8e,0x39,0xf5,0x78,0xe7,0xf3,0x25,0x20,0x60,0xa7,0x5d,0x03,0xbd,0x06,0x4c,0x89,0x98,0xfa,0xbe,0x66,0xa9,0x25,0xdc,0x03,0x6a,0x10,0x40,0x95,0xb6,0x13,0xe8,0x47,0xdb,0xe5,0xe1,0x10,0x26,0x43,0x3b,0x2a,0x5d,0xf3,0x76,0x12,0x78,0x38,0xe9,0x26,0x1f,0xac,0x69,0xcb,0xa0,0xa0,0x8c,0xdb,0xd4,0x29,0xd0,0x53,0x33,0x33,0xaf,0x0a,0xad,0xd9,0xe5,0x09,0xd3,0xac,0xa5,0x9d,0x66,0x38,0xf0,0xf7,0x88,0xc8,0x8a,0x65,0x57,0x3c,0xfa,0xbe,0x2c,0x05,0x51,0x8a,0xb3,0x4a},
+ {0x93,0xd5,0x68,0x67,0x25,0x2b,0x7c,0xda,0x13,0xca,0x22,0x44,0x57,0xc0,0xc1,0x98,0x1d,0xce,0x0a,0xca,0xd5,0x0b,0xa8,0xf1,0x90,0xa6,0x88,0xc0,0xad,0xd1,0xcd,0x29,0x9c,0xc0,0xdd,0x5f,0xef,0xd1,0xcf,0xd6,0xce,0x5d,0x57,0xf7,0xfd,0x3e,0x2b,0xe8,0xc2,0x34,0x16,0x20,0x5d,0x6b,0xd5,0x25,0x9b,0x2b,0xed,0x04,0xbb,0xc6,0x41,0x30,0x48,0xe1,0x56,0xd9,0xf9,0xf2,0xf2,0x0f,0x2e,0x6b,0x35,0x9f,0x75,0x97,0xe7,0xad,0x5c,0x02,0x6c,0x5f,0xbb,0x98,0x46,0x1a,0x7b,0x9a,0x04,0x14,0x68,0xbd,0x4b,0x10},
+ {0x67,0xed,0xf1,0x68,0x31,0xfd,0xf0,0x51,0xc2,0x3b,0x6f,0xd8,0xcd,0x1d,0x81,0x2c,0xde,0xf2,0xd2,0x04,0x43,0x5c,0xdc,0x44,0x49,0x71,0x2a,0x09,0x57,0xcc,0xe8,0x5b,0x63,0xf1,0x7f,0xd6,0x5f,0x9a,0x5d,0xa9,0x81,0x56,0xc7,0x4c,0x9d,0xe6,0x2b,0xe9,0x57,0xf2,0x20,0xde,0x4c,0x02,0xf8,0xb7,0xf5,0x2d,0x07,0xfb,0x20,0x2a,0x4f,0x20,0x79,0xb0,0xeb,0x30,0x3d,0x3b,0x14,0xc8,0x30,0x2e,0x65,0xbd,0x5a,0x15,0x89,0x75,0x31,0x5c,0x6d,0x8f,0x31,0x3c,0x3c,0x65,0x1f,0x16,0x79,0xc2,0x17,0xfb,0x70,0x25},
+ {0x75,0x15,0xb6,0x2c,0x7f,0x36,0xfa,0x3e,0x6c,0x02,0xd6,0x1c,0x76,0x6f,0xf9,0xf5,0x62,0x25,0xb5,0x65,0x2a,0x14,0xc7,0xe8,0xcd,0x0a,0x03,0x53,0xea,0x65,0xcb,0x3d,0x5a,0x24,0xb8,0x0b,0x55,0xa9,0x2e,0x19,0xd1,0x50,0x90,0x8f,0xa8,0xfb,0xe6,0xc8,0x35,0xc9,0xa4,0x88,0x2d,0xea,0x86,0x79,0x68,0x86,0x01,0xde,0x91,0x5f,0x1c,0x24,0xaa,0x6c,0xde,0x40,0x29,0x17,0xd8,0x28,0x3a,0x73,0xd9,0x22,0xf0,0x2c,0xbf,0x8f,0xd1,0x01,0x5b,0x23,0xdd,0xfc,0xd7,0x16,0xe5,0xf0,0xcd,0x5f,0xdd,0x0e,0x42,0x08},
+ {0x4a,0xfa,0x62,0x83,0xab,0x20,0xff,0xcd,0x6e,0x3e,0x1a,0xe2,0xd4,0x18,0xe1,0x57,0x2b,0xe6,0x39,0xfc,0x17,0x96,0x17,0xe3,0xfd,0x69,0x17,0xbc,0xef,0x53,0x9a,0x0d,0xce,0x10,0xf4,0x04,0x4e,0xc3,0x58,0x03,0x85,0x06,0x6e,0x27,0x5a,0x5b,0x13,0xb6,0x21,0x15,0xb9,0xeb,0xc7,0x70,0x96,0x5d,0x9c,0x88,0xdb,0x21,0xf3,0x54,0xd6,0x04,0xd5,0xb5,0xbd,0xdd,0x16,0xc1,0x7d,0x5e,0x2d,0xdd,0xa5,0x8d,0xb6,0xde,0x54,0x29,0x92,0xa2,0x34,0x33,0x17,0x08,0xb6,0x1c,0xd7,0x1a,0x99,0x18,0x26,0x4f,0x7a,0x4a},
+ {0x95,0x5f,0xb1,0x5f,0x02,0x18,0xa7,0xf4,0x8f,0x1b,0x5c,0x6b,0x34,0x5f,0xf6,0x3d,0x12,0x11,0xe0,0x00,0x85,0xf0,0xfc,0xcd,0x48,0x18,0xd3,0xdd,0x4c,0x0c,0xb5,0x11,0x4b,0x2a,0x37,0xaf,0x91,0xb2,0xc3,0x24,0xf2,0x47,0x81,0x71,0x70,0x82,0xda,0x93,0xf2,0x9e,0x89,0x86,0x64,0x85,0x84,0xdd,0x33,0xee,0xe0,0x23,0x42,0x31,0x96,0x4a,0xd6,0xff,0xa4,0x08,0x44,0x27,0xe8,0xa6,0xd9,0x76,0x15,0x9c,0x7e,0x17,0x8e,0x73,0xf2,0xb3,0x02,0x3d,0xb6,0x48,0x33,0x77,0x51,0xcc,0x6b,0xce,0x4d,0xce,0x4b,0x4f},
+ {0x84,0x25,0x24,0xe2,0x5a,0xce,0x1f,0xa7,0x9e,0x8a,0xf5,0x92,0x56,0x72,0xea,0x26,0xf4,0x3c,0xea,0x1c,0xd7,0x09,0x1a,0xd2,0xe6,0x01,0x1c,0xb7,0x14,0xdd,0xfc,0x73,0x6f,0x0b,0x9d,0xc4,0x6e,0x61,0xe2,0x30,0x17,0x23,0xec,0xca,0x8f,0x71,0x56,0xe4,0xa6,0x4f,0x6b,0xf2,0x9b,0x40,0xeb,0x48,0x37,0x5f,0x59,0x61,0xe5,0xce,0x42,0x30,0x41,0xac,0x9b,0x44,0x79,0x70,0x7e,0x42,0x0a,0x31,0xe2,0xbc,0x6d,0xe3,0x5a,0x85,0x7c,0x1a,0x84,0x5f,0x21,0x76,0xae,0x4c,0xd6,0xe1,0x9c,0x9a,0x0c,0x74,0x9e,0x38},
+ {0xce,0xb9,0xdc,0x34,0xae,0xb3,0xfc,0x64,0xad,0xd0,0x48,0xe3,0x23,0x03,0x50,0x97,0x1b,0x38,0xc6,0x62,0x7d,0xf0,0xb3,0x45,0x88,0x67,0x5a,0x46,0x79,0x53,0x54,0x61,0x28,0xac,0x0e,0x57,0xf6,0x78,0xbd,0xc9,0xe1,0x9c,0x91,0x27,0x32,0x0b,0x5b,0xe5,0xed,0x91,0x9b,0xa1,0xab,0x3e,0xfc,0x65,0x90,0x36,0x26,0xd6,0xe5,0x25,0xc4,0x25,0x6e,0xde,0xd7,0xf1,0xa6,0x06,0x3e,0x3f,0x08,0x23,0x06,0x8e,0x27,0x76,0xf9,0x3e,0x77,0x6c,0x8a,0x4e,0x26,0xf6,0x14,0x8c,0x59,0x47,0x48,0x15,0x89,0xa0,0x39,0x65},
+ {0x73,0xf7,0xd2,0xc3,0x74,0x1f,0xd2,0xe9,0x45,0x68,0xc4,0x25,0x41,0x54,0x50,0xc1,0x33,0x9e,0xb9,0xf9,0xe8,0x5c,0x4e,0x62,0x6c,0x18,0xcd,0xc5,0xaa,0xe4,0xc5,0x11,0x19,0x4a,0xbb,0x14,0xd4,0xdb,0xc4,0xdd,0x8e,0x4f,0x42,0x98,0x3c,0xbc,0xb2,0x19,0x69,0x71,0xca,0x36,0xd7,0x9f,0xa8,0x48,0x90,0xbd,0x19,0xf0,0x0e,0x32,0x65,0x0f,0xc6,0xe0,0xfd,0xca,0xb1,0xd1,0x86,0xd4,0x81,0x51,0x3b,0x16,0xe3,0xe6,0x3f,0x4f,0x9a,0x93,0xf2,0xfa,0x0d,0xaf,0xa8,0x59,0x2a,0x07,0x33,0xec,0xbd,0xc7,0xab,0x4c},
+ {0x2e,0x0a,0x9c,0x08,0x24,0x96,0x9e,0x23,0x38,0x47,0xfe,0x3a,0xc0,0xc4,0x48,0xc7,0x2a,0xa1,0x4f,0x76,0x2a,0xed,0xdb,0x17,0x82,0x85,0x1c,0x32,0xf0,0x93,0x9b,0x63,0x89,0xd2,0x78,0x3f,0x8f,0x78,0x8f,0xc0,0x9f,0x4d,0x40,0xa1,0x2c,0xa7,0x30,0xfe,0x9d,0xcc,0x65,0xcf,0xfc,0x8b,0x77,0xf2,0x21,0x20,0xcb,0x5a,0x16,0x98,0xe4,0x7e,0xc3,0xa1,0x11,0x91,0xe3,0x08,0xd5,0x7b,0x89,0x74,0x90,0x80,0xd4,0x90,0x2b,0x2b,0x19,0xfd,0x72,0xae,0xc2,0xae,0xd2,0xe7,0xa6,0x02,0xb6,0x85,0x3c,0x49,0xdf,0x0e},
+ {0x68,0x5a,0x9b,0x59,0x58,0x81,0xcc,0xae,0x0e,0xe2,0xad,0xeb,0x0f,0x4f,0x57,0xea,0x07,0x7f,0xb6,0x22,0x74,0x1d,0xe4,0x4f,0xb4,0x4f,0x9d,0x01,0xe3,0x92,0x3b,0x40,0x13,0x41,0x76,0x84,0xd2,0xc4,0x67,0x67,0x35,0xf8,0xf5,0xf7,0x3f,0x40,0x90,0xa0,0xde,0xbe,0xe6,0xca,0xfa,0xcf,0x8f,0x1c,0x69,0xa3,0xdf,0xd1,0x54,0x0c,0xc0,0x04,0xf8,0x5c,0x46,0x8b,0x81,0x2f,0xc2,0x4d,0xf8,0xef,0x80,0x14,0x5a,0xf3,0xa0,0x71,0x57,0xd6,0xc7,0x04,0xad,0xbf,0xe8,0xae,0xf4,0x76,0x61,0xb2,0x2a,0xb1,0x5b,0x35},
+ {0xf4,0xbb,0x93,0x74,0xcc,0x64,0x1e,0xa7,0xc3,0xb0,0xa3,0xec,0xd9,0x84,0xbd,0xe5,0x85,0xe7,0x05,0xfa,0x0c,0xc5,0x6b,0x0a,0x12,0xc3,0x2e,0x18,0x32,0x81,0x9b,0x0f,0x18,0x73,0x8c,0x5a,0xc7,0xda,0x01,0xa3,0x11,0xaa,0xce,0xb3,0x9d,0x03,0x90,0xed,0x2d,0x3f,0xae,0x3b,0xbf,0x7c,0x07,0x6f,0x8e,0xad,0x52,0xe0,0xf8,0xea,0x18,0x75,0x32,0x6c,0x7f,0x1b,0xc4,0x59,0x88,0xa4,0x98,0x32,0x38,0xf4,0xbc,0x60,0x2d,0x0f,0xd9,0xd1,0xb1,0xc9,0x29,0xa9,0x15,0x18,0xc4,0x55,0x17,0xbb,0x1b,0x87,0xc3,0x47},
+ {0x48,0x4f,0xec,0x71,0x97,0x53,0x44,0x51,0x6e,0x5d,0x8c,0xc9,0x7d,0xb1,0x05,0xf8,0x6b,0xc6,0xc3,0x47,0x1a,0xc1,0x62,0xf7,0xdc,0x99,0x46,0x76,0x85,0x9b,0xb8,0x00,0xb0,0x66,0x50,0xc8,0x50,0x5d,0xe6,0xfb,0xb0,0x99,0xa2,0xb3,0xb0,0xc4,0xec,0x62,0xe0,0xe8,0x1a,0x44,0xea,0x54,0x37,0xe5,0x5f,0x8d,0xd4,0xe8,0x2c,0xa0,0xfe,0x08,0xd0,0xea,0xde,0x68,0x76,0xdd,0x4d,0x82,0x23,0x5d,0x68,0x4b,0x20,0x45,0x64,0xc8,0x65,0xd6,0x89,0x5d,0xcd,0xcf,0x14,0xb5,0x37,0xd5,0x75,0x4f,0xa7,0x29,0x38,0x47},
+ {0x18,0xc4,0x79,0x46,0x75,0xda,0xd2,0x82,0xf0,0x8d,0x61,0xb2,0xd8,0xd7,0x3b,0xe6,0x0a,0xeb,0x47,0xac,0x24,0xef,0x5e,0x35,0xb4,0xc6,0x33,0x48,0x4c,0x68,0x78,0x20,0xc9,0x02,0x39,0xad,0x3a,0x53,0xd9,0x23,0x8f,0x58,0x03,0xef,0xce,0xdd,0xc2,0x64,0xb4,0x2f,0xe1,0xcf,0x90,0x73,0x25,0x15,0x90,0xd3,0xe4,0x44,0x4d,0x8b,0x66,0x6c,0x0c,0x82,0x78,0x7a,0x21,0xcf,0x48,0x3b,0x97,0x3e,0x27,0x81,0xb2,0x0a,0x6a,0xf7,0x7b,0xed,0x8e,0x8c,0xa7,0x65,0x6c,0xa9,0x3f,0x43,0x8a,0x4f,0x05,0xa6,0x11,0x74},
+ {0x6d,0xc8,0x9d,0xb9,0x32,0x9d,0x65,0x4d,0x15,0xf1,0x3a,0x60,0x75,0xdc,0x4c,0x04,0x88,0xe4,0xc2,0xdc,0x2c,0x71,0x4c,0xb3,0xff,0x34,0x81,0xfb,0x74,0x65,0x13,0x7c,0xb4,0x75,0xb1,0x18,0x3d,0xe5,0x9a,0x57,0x02,0xa1,0x92,0xf3,0x59,0x31,0x71,0x68,0xf5,0x35,0xef,0x1e,0xba,0xec,0x55,0x84,0x8f,0x39,0x8c,0x45,0x72,0xa8,0xc9,0x1e,0x9b,0x50,0xa2,0x00,0xd4,0xa4,0xe6,0xb8,0xb4,0x82,0xc8,0x0b,0x02,0xd7,0x81,0x9b,0x61,0x75,0x95,0xf1,0x9b,0xcc,0xe7,0x57,0x60,0x64,0xcd,0xc7,0xa5,0x88,0xdd,0x3a},
+ {0xf2,0xdc,0x35,0xb6,0x70,0x57,0x89,0xab,0xbc,0x1f,0x6c,0xf6,0x6c,0xef,0xdf,0x02,0x87,0xd1,0xb6,0xbe,0x68,0x02,0x53,0x85,0x74,0x9e,0x87,0xcc,0xfc,0x29,0x99,0x24,0x46,0x30,0x39,0x59,0xd4,0x98,0xc2,0x85,0xec,0x59,0xf6,0x5f,0x98,0x35,0x7e,0x8f,0x3a,0x6e,0xf6,0xf2,0x2a,0xa2,0x2c,0x1d,0x20,0xa7,0x06,0xa4,0x31,0x11,0xba,0x61,0x29,0x90,0x95,0x16,0xf1,0xa0,0xd0,0xa3,0x89,0xbd,0x7e,0xba,0x6c,0x6b,0x3b,0x02,0x07,0x33,0x78,0x26,0x3e,0x5a,0xf1,0x7b,0xe7,0xec,0xd8,0xbb,0x0c,0x31,0x20,0x56},
+ {0x43,0xd6,0x34,0x49,0x43,0x93,0x89,0x52,0xf5,0x22,0x12,0xa5,0x06,0xf8,0xdb,0xb9,0x22,0x1c,0xf4,0xc3,0x8f,0x87,0x6d,0x8f,0x30,0x97,0x9d,0x4d,0x2a,0x6a,0x67,0x37,0xd6,0x85,0xe2,0x77,0xf4,0xb5,0x46,0x66,0x93,0x61,0x8f,0x6c,0x67,0xff,0xe8,0x40,0xdd,0x94,0xb5,0xab,0x11,0x73,0xec,0xa6,0x4d,0xec,0x8c,0x65,0xf3,0x46,0xc8,0x7e,0xc7,0x2e,0xa2,0x1d,0x3f,0x8f,0x5e,0x9b,0x13,0xcd,0x01,0x6c,0x77,0x1d,0x0f,0x13,0xb8,0x9f,0x98,0xa2,0xcf,0x8f,0x4c,0x21,0xd5,0x9d,0x9b,0x39,0x23,0xf7,0xaa,0x6d},
+ {0x47,0xbe,0x3d,0xeb,0x62,0x75,0x3a,0x5f,0xb8,0xa0,0xbd,0x8e,0x54,0x38,0xea,0xf7,0x99,0x72,0x74,0x45,0x31,0xe5,0xc3,0x00,0x51,0xd5,0x27,0x16,0xe7,0xe9,0x04,0x13,0xa2,0x8e,0xad,0xac,0xbf,0x04,0x3b,0x58,0x84,0xe8,0x8b,0x14,0xe8,0x43,0xb7,0x29,0xdb,0xc5,0x10,0x08,0x3b,0x58,0x1e,0x2b,0xaa,0xbb,0xb3,0x8e,0xe5,0x49,0x54,0x2b,0xfe,0x9c,0xdc,0x6a,0xd2,0x14,0x98,0x78,0x0b,0xdd,0x48,0x8b,0x3f,0xab,0x1b,0x3c,0x0a,0xc6,0x79,0xf9,0xff,0xe1,0x0f,0xda,0x93,0xd6,0x2d,0x7c,0x2d,0xde,0x68,0x44},
+ {0x9e,0x46,0x19,0x94,0x5e,0x35,0xbb,0x51,0x54,0xc7,0xdd,0x23,0x4c,0xdc,0xe6,0x33,0x62,0x99,0x7f,0x44,0xd6,0xb6,0xa5,0x93,0x63,0xbd,0x44,0xfb,0x6f,0x7c,0xce,0x6c,0xce,0x07,0x63,0xf8,0xc6,0xd8,0x9a,0x4b,0x28,0x0c,0x5d,0x43,0x31,0x35,0x11,0x21,0x2c,0x77,0x7a,0x65,0xc5,0x66,0xa8,0xd4,0x52,0x73,0x24,0x63,0x7e,0x42,0xa6,0x5d,0xca,0x22,0xac,0xde,0x88,0xc6,0x94,0x1a,0xf8,0x1f,0xae,0xbb,0xf7,0x6e,0x06,0xb9,0x0f,0x58,0x59,0x8d,0x38,0x8c,0xad,0x88,0xa8,0x2c,0x9f,0xe7,0xbf,0x9a,0xf2,0x58},
+ {0x68,0x3e,0xe7,0x8d,0xab,0xcf,0x0e,0xe9,0xa5,0x76,0x7e,0x37,0x9f,0x6f,0x03,0x54,0x82,0x59,0x01,0xbe,0x0b,0x5b,0x49,0xf0,0x36,0x1e,0xf4,0xa7,0xc4,0x29,0x76,0x57,0xf6,0xcd,0x0e,0x71,0xbf,0x64,0x5a,0x4b,0x3c,0x29,0x2c,0x46,0x38,0xe5,0x4c,0xb1,0xb9,0x3a,0x0b,0xd5,0x56,0xd0,0x43,0x36,0x70,0x48,0x5b,0x18,0x24,0x37,0xf9,0x6a,0x88,0xa8,0xc6,0x09,0x45,0x02,0x20,0x32,0x73,0x89,0x55,0x4b,0x13,0x36,0xe0,0xd2,0x9f,0x28,0x33,0x3c,0x23,0x36,0xe2,0x83,0x8f,0xc1,0xae,0x0c,0xbb,0x25,0x1f,0x70},
+ {0xed,0x6c,0x61,0xe4,0xf8,0xb0,0xa8,0xc3,0x7d,0xa8,0x25,0x9e,0x0e,0x66,0x00,0xf7,0x9c,0xa5,0xbc,0xf4,0x1f,0x06,0xe3,0x61,0xe9,0x0b,0xc4,0xbd,0xbf,0x92,0x0c,0x2e,0x13,0xc1,0xbe,0x7c,0xd9,0xf6,0x18,0x9d,0xe4,0xdb,0xbf,0x74,0xe6,0x06,0x4a,0x84,0xd6,0x60,0x4e,0xac,0x22,0xb5,0xf5,0x20,0x51,0x5e,0x95,0x50,0xc0,0x5b,0x0a,0x72,0x35,0x5a,0x80,0x9b,0x43,0x09,0x3f,0x0c,0xfc,0xab,0x42,0x62,0x37,0x8b,0x4e,0xe8,0x46,0x93,0x22,0x5c,0xf3,0x17,0x14,0x69,0xec,0xf0,0x4e,0x14,0xbb,0x9c,0x9b,0x0e},
+ {0xad,0x20,0x57,0xfb,0x8f,0xd4,0xba,0xfb,0x0e,0x0d,0xf9,0xdb,0x6b,0x91,0x81,0xee,0xbf,0x43,0x55,0x63,0x52,0x31,0x81,0xd4,0xd8,0x7b,0x33,0x3f,0xeb,0x04,0x11,0x22,0xee,0xbe,0xb1,0x5d,0xd5,0x9b,0xee,0x8d,0xb9,0x3f,0x72,0x0a,0x37,0xab,0xc3,0xc9,0x91,0xd7,0x68,0x1c,0xbf,0xf1,0xa8,0x44,0xde,0x3c,0xfd,0x1c,0x19,0x44,0x6d,0x36,0x14,0x8c,0xbc,0xf2,0x43,0x17,0x3c,0x9e,0x3b,0x6c,0x85,0xb5,0xfc,0x26,0xda,0x2e,0x97,0xfb,0xa7,0x68,0x0e,0x2f,0xb8,0xcc,0x44,0x32,0x59,0xbc,0xe6,0xa4,0x67,0x41},
+ {0x00,0x27,0xf6,0x76,0x28,0x9d,0x3b,0x64,0xeb,0x68,0x76,0x0e,0x40,0x9d,0x1d,0x5d,0x84,0x06,0xfc,0x21,0x03,0x43,0x4b,0x1b,0x6a,0x24,0x55,0x22,0x7e,0xbb,0x38,0x79,0xee,0x8f,0xce,0xf8,0x65,0x26,0xbe,0xc2,0x2c,0xd6,0x80,0xe8,0x14,0xff,0x67,0xe9,0xee,0x4e,0x36,0x2f,0x7e,0x6e,0x2e,0xf1,0xf6,0xd2,0x7e,0xcb,0x70,0x33,0xb3,0x34,0xcc,0xd6,0x81,0x86,0xee,0x91,0xc5,0xcd,0x53,0xa7,0x85,0xed,0x9c,0x10,0x02,0xce,0x83,0x88,0x80,0x58,0xc1,0x85,0x74,0xed,0xe4,0x65,0xfe,0x2d,0x6e,0xfc,0x76,0x11},
+ {0x9b,0x61,0x9c,0x5b,0xd0,0x6c,0xaf,0xb4,0x80,0x84,0xa5,0xb2,0xf4,0xc9,0xdf,0x2d,0xc4,0x4d,0xe9,0xeb,0x02,0xa5,0x4f,0x3d,0x34,0x5f,0x7d,0x67,0x4c,0x3a,0xfc,0x08,0xb8,0x0e,0x77,0x49,0x89,0xe2,0x90,0xdb,0xa3,0x40,0xf4,0xac,0x2a,0xcc,0xfb,0x98,0x9b,0x87,0xd7,0xde,0xfe,0x4f,0x35,0x21,0xb6,0x06,0x69,0xf2,0x54,0x3e,0x6a,0x1f,0xea,0x34,0x07,0xd3,0x99,0xc1,0xa4,0x60,0xd6,0x5c,0x16,0x31,0xb6,0x85,0xc0,0x40,0x95,0x82,0x59,0xf7,0x23,0x3e,0x33,0xe2,0xd1,0x00,0xb9,0x16,0x01,0xad,0x2f,0x4f},
+ {0x54,0x4e,0xae,0x94,0x41,0xb2,0xbe,0x44,0x6c,0xef,0x57,0x18,0x51,0x1c,0x54,0x5f,0x98,0x04,0x8d,0x36,0x2d,0x6b,0x1e,0xa6,0xab,0xf7,0x2e,0x97,0xa4,0x84,0x54,0x44,0x38,0xb6,0x3b,0xb7,0x1d,0xd9,0x2c,0x96,0x08,0x9c,0x12,0xfc,0xaa,0x77,0x05,0xe6,0x89,0x16,0xb6,0xf3,0x39,0x9b,0x61,0x6f,0x81,0xee,0x44,0x29,0x5f,0x99,0x51,0x34,0x7c,0x7d,0xea,0x9f,0xd0,0xfc,0x52,0x91,0xf6,0x5c,0x93,0xb0,0x94,0x6c,0x81,0x4a,0x40,0x5c,0x28,0x47,0xaa,0x9a,0x8e,0x25,0xb7,0x93,0x28,0x04,0xa6,0x9c,0xb8,0x10},
+ {0x9c,0x28,0x18,0x97,0x49,0x47,0x59,0x3d,0x26,0x3f,0x53,0x24,0xc5,0xf8,0xeb,0x12,0x15,0xef,0xc3,0x14,0xcb,0xbf,0x62,0x02,0x8e,0x51,0xb7,0x77,0xd5,0x78,0xb8,0x20,0x6e,0xf0,0x45,0x5a,0xbe,0x41,0x39,0x75,0x65,0x5f,0x9c,0x6d,0xed,0xae,0x7c,0xd0,0xb6,0x51,0xff,0x72,0x9c,0x6b,0x77,0x11,0xa9,0x4d,0x0d,0xef,0xd9,0xd1,0xd2,0x17,0x6a,0x3e,0x3f,0x07,0x18,0xaf,0xf2,0x27,0x69,0x10,0x52,0xd7,0x19,0xe5,0x3f,0xfd,0x22,0x00,0xa6,0x3c,0x2c,0xb7,0xe3,0x22,0xa7,0xc6,0x65,0xcc,0x63,0x4f,0x21,0x72},
+ {0x93,0xa6,0x07,0x53,0x40,0x7f,0xe3,0xb4,0x95,0x67,0x33,0x2f,0xd7,0x14,0xa7,0xab,0x99,0x10,0x76,0x73,0xa7,0xd0,0xfb,0xd6,0xc9,0xcb,0x71,0x81,0xc5,0x48,0xdf,0x5f,0xc9,0x29,0x3b,0xf4,0xb9,0xb7,0x9d,0x1d,0x75,0x8f,0x51,0x4f,0x4a,0x82,0x05,0xd6,0xc4,0x9d,0x2f,0x31,0xbd,0x72,0xc0,0xf2,0xb0,0x45,0x15,0x5a,0x85,0xac,0x24,0x1f,0xaa,0x05,0x95,0x8e,0x32,0x08,0xd6,0x24,0xee,0x20,0x14,0x0c,0xd1,0xc1,0x48,0x47,0xa2,0x25,0xfb,0x06,0x5c,0xe4,0xff,0xc7,0xe6,0x95,0xe3,0x2a,0x9e,0x73,0xba,0x00},
+ {0xd6,0x90,0x87,0x5c,0xde,0x98,0x2e,0x59,0xdf,0xa2,0xc2,0x45,0xd3,0xb7,0xbf,0xe5,0x22,0x99,0xb4,0xf9,0x60,0x3b,0x5a,0x11,0xf3,0x78,0xad,0x67,0x3e,0x3a,0x28,0x03,0x26,0xbb,0x88,0xea,0xf5,0x26,0x44,0xae,0xfb,0x3b,0x97,0x84,0xd9,0x79,0x06,0x36,0x50,0x4e,0x69,0x26,0x0c,0x03,0x9f,0x5c,0x26,0xd2,0x18,0xd5,0xe7,0x7d,0x29,0x72,0x39,0xb9,0x0c,0xbe,0xc7,0x1d,0x24,0x48,0x80,0x30,0x63,0x8b,0x4d,0x9b,0xf1,0x32,0x08,0x93,0x28,0x02,0x0d,0xc9,0xdf,0xd3,0x45,0x19,0x27,0x46,0x68,0x29,0xe1,0x05},
+ {0x5a,0x49,0x9c,0x2d,0xb3,0xee,0x82,0xba,0x7c,0xb9,0x2b,0xf1,0xfc,0xc8,0xef,0xce,0xe0,0xd1,0xb5,0x93,0xae,0xab,0x2d,0xb0,0x9b,0x8d,0x69,0x13,0x9c,0x0c,0xc0,0x39,0x50,0x45,0x2c,0x24,0xc8,0xbb,0xbf,0xad,0xd9,0x81,0x30,0xd0,0xec,0x0c,0xc8,0xbc,0x92,0xdf,0xc8,0xf5,0xa6,0x66,0x35,0x84,0x4c,0xce,0x58,0x82,0xd3,0x25,0xcf,0x78,0x68,0x9d,0x48,0x31,0x8e,0x6b,0xae,0x15,0x87,0xf0,0x2b,0x9c,0xab,0x1c,0x85,0xaa,0x05,0xfa,0x4e,0xf0,0x97,0x5a,0xa7,0xc9,0x32,0xf8,0x3f,0x6b,0x07,0x52,0x6b,0x00},
+ {0x1c,0x78,0x95,0x9d,0xe1,0xcf,0xe0,0x29,0xe2,0x10,0x63,0x96,0x18,0xdf,0x81,0xb6,0x39,0x6b,0x51,0x70,0xd3,0x39,0xdf,0x57,0x22,0x61,0xc7,0x3b,0x44,0xe3,0x57,0x4d,0x2d,0x08,0xce,0xb9,0x16,0x7e,0xcb,0xf5,0x29,0xbc,0x7a,0x41,0x4c,0xf1,0x07,0x34,0xab,0xa7,0xf4,0x2b,0xce,0x6b,0xb3,0xd4,0xce,0x75,0x9f,0x1a,0x56,0xe9,0xe2,0x7d,0xcb,0x5e,0xa5,0xb6,0xf4,0xd4,0x70,0xde,0x99,0xdb,0x85,0x5d,0x7f,0x52,0x01,0x48,0x81,0x9a,0xee,0xd3,0x40,0xc4,0xc9,0xdb,0xed,0x29,0x60,0x1a,0xaf,0x90,0x2a,0x6b},
+ {0x97,0x1e,0xe6,0x9a,0xfc,0xf4,0x23,0x69,0xd1,0x5f,0x3f,0xe0,0x1d,0x28,0x35,0x57,0x2d,0xd1,0xed,0xe6,0x43,0xae,0x64,0xa7,0x4a,0x3e,0x2d,0xd1,0xe9,0xf4,0xd8,0x5f,0x0a,0xd8,0xb2,0x5b,0x24,0xf3,0xeb,0x77,0x9b,0x07,0xb9,0x2f,0x47,0x1b,0x30,0xd8,0x33,0x73,0xee,0x4c,0xf2,0xe6,0x47,0xc6,0x09,0x21,0x6c,0x27,0xc8,0x12,0x58,0x46,0xd9,0x62,0x10,0x2a,0xb2,0xbe,0x43,0x4d,0x16,0xdc,0x31,0x38,0x75,0xfb,0x65,0x70,0xd7,0x68,0x29,0xde,0x7b,0x4a,0x0d,0x18,0x90,0x67,0xb1,0x1c,0x2b,0x2c,0xb3,0x05},
+ {0xfd,0xa8,0x4d,0xd2,0xcc,0x5e,0xc0,0xc8,0x83,0xef,0xdf,0x05,0xac,0x1a,0xcf,0xa1,0x61,0xcd,0xf9,0x7d,0xf2,0xef,0xbe,0xdb,0x99,0x1e,0x47,0x7b,0xa3,0x56,0x55,0x3b,0x95,0x81,0xd5,0x7a,0x2c,0xa4,0xfc,0xf7,0xcc,0xf3,0x33,0x43,0x6e,0x28,0x14,0x32,0x9d,0x97,0x0b,0x34,0x0d,0x9d,0xc2,0xb6,0xe1,0x07,0x73,0x56,0x48,0x1a,0x77,0x31,0x82,0xd4,0x4d,0xe1,0x24,0xc5,0xb0,0x32,0xb6,0xa4,0x2b,0x1a,0x54,0x51,0xb3,0xed,0xf3,0x5a,0x2b,0x28,0x48,0x60,0xd1,0xa3,0xeb,0x36,0x73,0x7a,0xd2,0x79,0xc0,0x4f},
+ {0x7f,0x2f,0xbf,0x89,0xb0,0x38,0xc9,0x51,0xa7,0xe9,0xdf,0x02,0x65,0xbd,0x97,0x24,0x53,0xe4,0x80,0x78,0x9c,0xc0,0xff,0xff,0x92,0x8e,0xf9,0xca,0xce,0x67,0x45,0x12,0x0d,0xc5,0x86,0x0c,0x44,0x8b,0x34,0xdc,0x51,0xe6,0x94,0xcc,0xc9,0xcb,0x37,0x13,0xb9,0x3c,0x3e,0x64,0x4d,0xf7,0x22,0x64,0x08,0xcd,0xe3,0xba,0xc2,0x70,0x11,0x24,0xb4,0x73,0xc4,0x0a,0x86,0xab,0xf9,0x3f,0x35,0xe4,0x13,0x01,0xee,0x1d,0x91,0xf0,0xaf,0xc4,0xc6,0xeb,0x60,0x50,0xe7,0x4a,0x0d,0x00,0x87,0x6c,0x96,0x12,0x86,0x3f},
+ {0xde,0x0d,0x2a,0x78,0xc9,0x0c,0x9a,0x55,0x85,0x83,0x71,0xea,0xb2,0xcd,0x1d,0x55,0x8c,0x23,0xef,0x31,0x5b,0x86,0x62,0x7f,0x3d,0x61,0x73,0x79,0x76,0xa7,0x4a,0x50,0x13,0x8d,0x04,0x36,0xfa,0xfc,0x18,0x9c,0xdd,0x9d,0x89,0x73,0xb3,0x9d,0x15,0x29,0xaa,0xd0,0x92,0x9f,0x0b,0x35,0x9f,0xdc,0xd4,0x19,0x8a,0x87,0xee,0x7e,0xf5,0x26,0xb1,0xef,0x87,0x56,0xd5,0x2c,0xab,0x0c,0x7b,0xf1,0x7a,0x24,0x62,0xd1,0x80,0x51,0x67,0x24,0x5a,0x4f,0x34,0x5a,0xc1,0x85,0x69,0x30,0xba,0x9d,0x3d,0x94,0x41,0x40},
+ {0x96,0xcc,0xeb,0x43,0xba,0xee,0xc0,0xc3,0xaf,0x9c,0xea,0x26,0x9c,0x9c,0x74,0x8d,0xc6,0xcc,0x77,0x1c,0xee,0x95,0xfa,0xd9,0x0f,0x34,0x84,0x76,0xd9,0xa1,0x20,0x14,0xdd,0xaa,0x6c,0xa2,0x43,0x77,0x21,0x4b,0xce,0xb7,0x8a,0x64,0x24,0xb4,0xa6,0x47,0xe3,0xc9,0xfb,0x03,0x7a,0x4f,0x1d,0xcb,0x19,0xd0,0x00,0x98,0x42,0x31,0xd9,0x12,0x4f,0x59,0x37,0xd3,0x99,0x77,0xc6,0x00,0x7b,0xa4,0x3a,0xb2,0x40,0x51,0x3c,0x5e,0x95,0xf3,0x5f,0xe3,0x54,0x28,0x18,0x44,0x12,0xa0,0x59,0x43,0x31,0x92,0x4f,0x1b},
+ {0x51,0x09,0x15,0x89,0x9d,0x10,0x5c,0x3e,0x6a,0x69,0xe9,0x2d,0x91,0xfa,0xce,0x39,0x20,0x30,0x5f,0x97,0x3f,0xe4,0xea,0x20,0xae,0x2d,0x13,0x7f,0x2a,0x57,0x9b,0x23,0xb1,0x66,0x98,0xa4,0x30,0x30,0xcf,0x33,0x59,0x48,0x5f,0x21,0xd2,0x73,0x1f,0x25,0xf6,0xf4,0xde,0x51,0x40,0xaa,0x82,0xab,0xf6,0x23,0x9a,0x6f,0xd5,0x91,0xf1,0x5f,0x68,0x90,0x2d,0xac,0x33,0xd4,0x9e,0x81,0x23,0x85,0xc9,0x5f,0x79,0xab,0x83,0x28,0x3d,0xeb,0x93,0x55,0x80,0x72,0x45,0xef,0xcb,0x36,0x8f,0x75,0x6a,0x52,0x0c,0x02},
+ {0xbc,0xdb,0xd8,0x9e,0xf8,0x34,0x98,0x77,0x6c,0xa4,0x7c,0xdc,0xf9,0xaa,0xf2,0xc8,0x74,0xb0,0xe1,0xa3,0xdc,0x4c,0x52,0xa9,0x77,0x38,0x31,0x15,0x46,0xcc,0xaa,0x02,0x89,0xcc,0x42,0xf0,0x59,0xef,0x31,0xe9,0xb6,0x4b,0x12,0x8e,0x9d,0x9c,0x58,0x2c,0x97,0x59,0xc7,0xae,0x8a,0xe1,0xc8,0xad,0x0c,0xc5,0x02,0x56,0x0a,0xfe,0x2c,0x45,0xdf,0x77,0x78,0x64,0xa0,0xf7,0xa0,0x86,0x9f,0x7c,0x60,0x0e,0x27,0x64,0xc4,0xbb,0xc9,0x11,0xfb,0xf1,0x25,0xea,0x17,0xab,0x7b,0x87,0x4b,0x30,0x7b,0x7d,0xfb,0x4c},
+ {0xfe,0x75,0x9b,0xb8,0x6c,0x3d,0xb4,0x72,0x80,0xdc,0x6a,0x9c,0xd9,0x94,0xc6,0x54,0x9f,0x4c,0xe3,0x3e,0x37,0xaa,0xc3,0xb8,0x64,0x53,0x07,0x39,0x2b,0x62,0xb4,0x14,0x12,0xef,0x89,0x97,0xc2,0x99,0x86,0xe2,0x0d,0x19,0x57,0xdf,0x71,0xcd,0x6e,0x2b,0xd0,0x70,0xc9,0xec,0x57,0xc8,0x43,0xc3,0xc5,0x3a,0x4d,0x43,0xbc,0x4c,0x1d,0x5b,0x26,0x9f,0x0a,0xcc,0x15,0x26,0xfb,0xb6,0xe5,0xcc,0x8d,0xb8,0x2b,0x0e,0x4f,0x3a,0x05,0xa7,0x69,0x33,0x8b,0x49,0x01,0x13,0xd1,0x2d,0x59,0x58,0x12,0xf7,0x98,0x2f},
+ {0x56,0x9e,0x0f,0xb5,0x4c,0xa7,0x94,0x0c,0x20,0x13,0x8e,0x8e,0xa9,0xf4,0x1f,0x5b,0x67,0x0f,0x30,0x82,0x21,0xcc,0x2a,0x9a,0xf9,0xaa,0x06,0xd8,0x49,0xe2,0x6a,0x3a,0x01,0xa7,0x54,0x4f,0x44,0xae,0x12,0x2e,0xde,0xd7,0xcb,0xa9,0xf0,0x3e,0xfe,0xfc,0xe0,0x5d,0x83,0x75,0x0d,0x89,0xbf,0xce,0x54,0x45,0x61,0xe7,0xe9,0x62,0x80,0x1d,0x5a,0x7c,0x90,0xa9,0x85,0xda,0x7a,0x65,0x62,0x0f,0xb9,0x91,0xb5,0xa8,0x0e,0x1a,0xe9,0xb4,0x34,0xdf,0xfb,0x1d,0x0e,0x8d,0xf3,0x5f,0xf2,0xae,0xe8,0x8c,0x8b,0x29},
+ {0xb2,0x0c,0xf7,0xef,0x53,0x79,0x92,0x2a,0x76,0x70,0x15,0x79,0x2a,0xc9,0x89,0x4b,0x6a,0xcf,0xa7,0x30,0x7a,0x45,0x18,0x94,0x85,0xe4,0x5c,0x4d,0x40,0xa8,0xb8,0x34,0xde,0x65,0x21,0x0a,0xea,0x72,0x7a,0x83,0xf6,0x79,0xcf,0x0b,0xb4,0x07,0xab,0x3f,0x70,0xae,0x38,0x77,0xc7,0x36,0x16,0x52,0xdc,0xd7,0xa7,0x03,0x18,0x27,0xa6,0x6b,0x35,0x33,0x69,0x83,0xb5,0xec,0x6e,0xc2,0xfd,0xfe,0xb5,0x63,0xdf,0x13,0xa8,0xd5,0x73,0x25,0xb2,0xa4,0x9a,0xaa,0x93,0xa2,0x6a,0x1c,0x5e,0x46,0xdd,0x2b,0xd6,0x71},
+ {0x80,0xdf,0x78,0xd3,0x28,0xcc,0x33,0x65,0xb4,0xa4,0x0f,0x0a,0x79,0x43,0xdb,0xf6,0x5a,0xda,0x01,0xf7,0xf9,0x5f,0x64,0xe3,0xa4,0x2b,0x17,0xf3,0x17,0xf3,0xd5,0x74,0xf5,0x5e,0xf7,0xb1,0xda,0xb5,0x2d,0xcd,0xf5,0x65,0xb0,0x16,0xcf,0x95,0x7f,0xd7,0x85,0xf0,0x49,0x3f,0xea,0x1f,0x57,0x14,0x3d,0x2b,0x2b,0x26,0x21,0x36,0x33,0x1c,0x81,0xca,0xd9,0x67,0x54,0xe5,0x6f,0xa8,0x37,0x8c,0x29,0x2b,0x75,0x7c,0x8b,0x39,0x3b,0x62,0xac,0xe3,0x92,0x08,0x6d,0xda,0x8c,0xd9,0xe9,0x47,0x45,0xcc,0xeb,0x4a},
+ {0xc9,0x01,0x6d,0x27,0x1b,0x07,0xf0,0x12,0x70,0x8c,0xc4,0x86,0xc5,0xba,0xb8,0xe7,0xa9,0xfb,0xd6,0x71,0x9b,0x12,0x08,0x53,0x92,0xb7,0x3d,0x5a,0xf9,0xfb,0x88,0x5d,0x10,0xb6,0x54,0x73,0x9e,0x8d,0x40,0x0b,0x6e,0x5b,0xa8,0x5b,0x53,0x32,0x6b,0x80,0x07,0xa2,0x58,0x4a,0x03,0x3a,0xe6,0xdb,0x2c,0xdf,0xa1,0xc9,0xdd,0xd9,0x3b,0x17,0xdf,0x72,0x58,0xfe,0x1e,0x0f,0x50,0x2b,0xc1,0x18,0x39,0xd4,0x2e,0x58,0xd6,0x58,0xe0,0x3a,0x67,0xc9,0x8e,0x27,0xed,0xe6,0x19,0xa3,0x9e,0xb1,0x13,0xcd,0xe1,0x06},
+ {0x23,0x6f,0x16,0x6f,0x51,0xad,0xd0,0x40,0xbe,0x6a,0xab,0x1f,0x93,0x32,0x8e,0x11,0x8e,0x08,0x4d,0xa0,0x14,0x5e,0xe3,0x3f,0x66,0x62,0xe1,0x26,0x35,0x60,0x80,0x30,0x53,0x03,0x5b,0x9e,0x62,0xaf,0x2b,0x47,0x47,0x04,0x8d,0x27,0x90,0x0b,0xaa,0x3b,0x27,0xbf,0x43,0x96,0x46,0x5f,0x78,0x0c,0x13,0x7b,0x83,0x8d,0x1a,0x6a,0x3a,0x7f,0x0b,0x80,0x3d,0x5d,0x39,0x44,0xe6,0xf7,0xf6,0xed,0x01,0xc9,0x55,0xd5,0xa8,0x95,0x39,0x63,0x2c,0x59,0x30,0x78,0xcd,0x68,0x7e,0x30,0x51,0x2e,0xed,0xfd,0xd0,0x30},
+ {0xb3,0x33,0x12,0xf2,0x1a,0x4d,0x59,0xe0,0x9c,0x4d,0xcc,0xf0,0x8e,0xe7,0xdb,0x1b,0x77,0x9a,0x49,0x8f,0x7f,0x18,0x65,0x69,0x68,0x98,0x09,0x2c,0x20,0x14,0x92,0x0a,0x50,0x47,0xb8,0x68,0x1e,0x97,0xb4,0x9c,0xcf,0xbb,0x64,0x66,0x29,0x72,0x95,0xa0,0x2b,0x41,0xfa,0x72,0x26,0xe7,0x8d,0x5c,0xd9,0x89,0xc5,0x51,0x43,0x08,0x15,0x46,0x2e,0xa0,0xb9,0xae,0xc0,0x19,0x90,0xbc,0xae,0x4c,0x03,0x16,0x0d,0x11,0xc7,0x55,0xec,0x32,0x99,0x65,0x01,0xf5,0x6d,0x0e,0xfe,0x5d,0xca,0x95,0x28,0x0d,0xca,0x3b},
+ {0xa4,0x62,0x5d,0x3c,0xbc,0x31,0xf0,0x40,0x60,0x7a,0xf0,0xcf,0x3e,0x8b,0xfc,0x19,0x45,0xb5,0x0f,0x13,0xa2,0x3d,0x18,0x98,0xcd,0x13,0x8f,0xae,0xdd,0xde,0x31,0x56,0xbf,0x01,0xcc,0x9e,0xb6,0x8e,0x68,0x9c,0x6f,0x89,0x44,0xa6,0xad,0x83,0xbc,0xf0,0xe2,0x9f,0x7a,0x5f,0x5f,0x95,0x2d,0xca,0x41,0x82,0xf2,0x8d,0x03,0xb4,0xa8,0x4e,0x02,0xd2,0xca,0xf1,0x0a,0x46,0xed,0x2a,0x83,0xee,0x8c,0xa4,0x05,0x53,0x30,0x46,0x5f,0x1a,0xf1,0x49,0x45,0x77,0x21,0x91,0x63,0xa4,0x2c,0x54,0x30,0x09,0xce,0x24},
+ {0x06,0xc1,0x06,0xfd,0xf5,0x90,0xe8,0x1f,0xf2,0x10,0x88,0x5d,0x35,0x68,0xc4,0xb5,0x3e,0xaf,0x8c,0x6e,0xfe,0x08,0x78,0x82,0x4b,0xd7,0x06,0x8a,0xc2,0xe3,0xd4,0x41,0x85,0x0b,0xf3,0xfd,0x55,0xa1,0xcf,0x3f,0xa4,0x2e,0x37,0x36,0x8e,0x16,0xf7,0xd2,0x44,0xf8,0x92,0x64,0xde,0x64,0xe0,0xb2,0x80,0x42,0x4f,0x32,0xa7,0x28,0x99,0x54,0x2e,0x1a,0xee,0x63,0xa7,0x32,0x6e,0xf2,0xea,0xfd,0x5f,0xd2,0xb7,0xe4,0x91,0xae,0x69,0x4d,0x7f,0xd1,0x3b,0xd3,0x3b,0xbc,0x6a,0xff,0xdc,0xc0,0xde,0x66,0x1b,0x49},
+ {0xa7,0x32,0xea,0xc7,0x3d,0xb1,0xf5,0x98,0x98,0xdb,0x16,0x7e,0xcc,0xf8,0xd5,0xe3,0x47,0xd9,0xf8,0xcb,0x52,0xbf,0x0a,0xac,0xac,0xe4,0x5e,0xc8,0xd0,0x38,0xf3,0x08,0xa1,0x64,0xda,0xd0,0x8e,0x4a,0xf0,0x75,0x4b,0x28,0xe2,0x67,0xaf,0x2c,0x22,0xed,0xa4,0x7b,0x7b,0x1f,0x79,0xa3,0x34,0x82,0x67,0x8b,0x01,0xb7,0xb0,0xb8,0xf6,0x4c,0xbd,0x73,0x1a,0x99,0x21,0xa8,0x83,0xc3,0x7a,0x0c,0x32,0xdf,0x01,0xbc,0x27,0xab,0x63,0x70,0x77,0x84,0x1b,0x33,0x3d,0xc1,0x99,0x8a,0x07,0xeb,0x82,0x4a,0x0d,0x53},
+ {0x25,0x48,0xf9,0xe1,0x30,0x36,0x4c,0x00,0x5a,0x53,0xab,0x8c,0x26,0x78,0x2d,0x7e,0x8b,0xff,0x84,0xcc,0x23,0x23,0x48,0xc7,0xb9,0x70,0x17,0x10,0x3f,0x75,0xea,0x65,0x9e,0xbf,0x9a,0x6c,0x45,0x73,0x69,0x6d,0x80,0xa8,0x00,0x49,0xfc,0xb2,0x7f,0x25,0x50,0xb8,0xcf,0xc8,0x12,0xf4,0xac,0x2b,0x5b,0xbd,0xbf,0x0c,0xe0,0xe7,0xb3,0x0d,0x63,0x63,0x09,0xe2,0x3e,0xfc,0x66,0x3d,0x6b,0xcb,0xb5,0x61,0x7f,0x2c,0xd6,0x81,0x1a,0x3b,0x44,0x13,0x42,0x04,0xbe,0x0f,0xdb,0xa1,0xe1,0x21,0x19,0xec,0xa4,0x02},
+ {0xa2,0xb8,0x24,0x3b,0x9a,0x25,0xe6,0x5c,0xb8,0xa0,0xaf,0x45,0xcc,0x7a,0x57,0xb8,0x37,0x70,0xa0,0x8b,0xe8,0xe6,0xcb,0xcc,0xbf,0x09,0x78,0x12,0x51,0x3c,0x14,0x3d,0x5f,0x79,0xcf,0xf1,0x62,0x61,0xc8,0xf5,0xf2,0x57,0xee,0x26,0x19,0x86,0x8c,0x11,0x78,0x35,0x06,0x1c,0x85,0x24,0x21,0x17,0xcf,0x7f,0x06,0xec,0x5d,0x2b,0xd1,0x36,0x57,0x45,0x15,0x79,0x91,0x27,0x6d,0x12,0x0a,0x3a,0x78,0xfc,0x5c,0x8f,0xe4,0xd5,0xac,0x9b,0x17,0xdf,0xe8,0xb6,0xbd,0x36,0x59,0x28,0xa8,0x5b,0x88,0x17,0xf5,0x2e},
+ {0xdc,0xae,0x58,0x8c,0x4e,0x97,0x37,0x46,0xa4,0x41,0xf0,0xab,0xfb,0x22,0xef,0xb9,0x8a,0x71,0x80,0xe9,0x56,0xd9,0x85,0xe1,0xa6,0xa8,0x43,0xb1,0xfa,0x78,0x1b,0x2f,0x51,0x2f,0x5b,0x30,0xfb,0xbf,0xee,0x96,0xb8,0x96,0x95,0x88,0xad,0x38,0xf9,0xd3,0x25,0xdd,0xd5,0x46,0xc7,0x2d,0xf5,0xf0,0x95,0x00,0x3a,0xbb,0x90,0x82,0x96,0x57,0x01,0xe1,0x20,0x0a,0x43,0xb8,0x1a,0xf7,0x47,0xec,0xf0,0x24,0x8d,0x65,0x93,0xf3,0xd1,0xee,0xe2,0x6e,0xa8,0x09,0x75,0xcf,0xe1,0xa3,0x2a,0xdc,0x35,0x3e,0xc4,0x7d},
+ {0xc3,0xd9,0x7d,0x88,0x65,0x66,0x96,0x85,0x55,0x53,0xb0,0x4b,0x31,0x9b,0x0f,0xc9,0xb1,0x79,0x20,0xef,0xf8,0x8d,0xe0,0xc6,0x2f,0xc1,0x8c,0x75,0x16,0x20,0xf7,0x7e,0x18,0x97,0x3e,0x27,0x5c,0x2a,0x78,0x5a,0x94,0xfd,0x4e,0x5e,0x99,0xc6,0x76,0x35,0x3e,0x7d,0x23,0x1f,0x05,0xd8,0x2e,0x0f,0x99,0x0a,0xd5,0x82,0x1d,0xb8,0x4f,0x04,0xd9,0xe3,0x07,0xa9,0xc5,0x18,0xdf,0xc1,0x59,0x63,0x4c,0xce,0x1d,0x37,0xb3,0x57,0x49,0xbb,0x01,0xb2,0x34,0x45,0x70,0xca,0x2e,0xdd,0x30,0x9c,0x3f,0x82,0x79,0x7f},
+ {0xe8,0x13,0xb5,0xa3,0x39,0xd2,0x34,0x83,0xd8,0xa8,0x1f,0xb9,0xd4,0x70,0x36,0xc1,0x33,0xbd,0x90,0xf5,0x36,0x41,0xb5,0x12,0xb4,0xd9,0x84,0xd7,0x73,0x03,0x4e,0x0a,0xba,0x87,0xf5,0x68,0xf0,0x1f,0x9c,0x6a,0xde,0xc8,0x50,0x00,0x4e,0x89,0x27,0x08,0xe7,0x5b,0xed,0x7d,0x55,0x99,0xbf,0x3c,0xf0,0xd6,0x06,0x1c,0x43,0xb0,0xa9,0x64,0x19,0x29,0x7d,0x5b,0xa1,0xd6,0xb3,0x2e,0x35,0x82,0x3a,0xd5,0xa0,0xf6,0xb4,0xb0,0x47,0x5d,0xa4,0x89,0x43,0xce,0x56,0x71,0x6c,0x34,0x18,0xce,0x0a,0x7d,0x1a,0x07},
+ {0x0b,0xba,0x87,0xc8,0xaa,0x2d,0x07,0xd3,0xee,0x62,0xa5,0xbf,0x05,0x29,0x26,0x01,0x8b,0x76,0xef,0xc0,0x02,0x30,0x54,0xcf,0x9c,0x7e,0xea,0x46,0x71,0xcc,0x3b,0x2c,0x31,0x44,0xe1,0x20,0x52,0x35,0x0c,0xcc,0x41,0x51,0xb1,0x09,0x07,0x95,0x65,0x0d,0x36,0x5f,0x9d,0x20,0x1b,0x62,0xf5,0x9a,0xd3,0x55,0x77,0x61,0xf7,0xbc,0x69,0x7c,0x5f,0x29,0xe8,0x04,0xeb,0xd7,0xf0,0x07,0x7d,0xf3,0x50,0x2f,0x25,0x18,0xdb,0x10,0xd7,0x98,0x17,0x17,0xa3,0xa9,0x51,0xe9,0x1d,0xa5,0xac,0x22,0x73,0x9a,0x5a,0x6f},
+ {0xc5,0xc6,0x41,0x2f,0x0c,0x00,0xa1,0x8b,0x9b,0xfb,0xfe,0x0c,0xc1,0x79,0x9f,0xc4,0x9f,0x1c,0xc5,0x3c,0x70,0x47,0xfa,0x4e,0xca,0xaf,0x47,0xe1,0xa2,0x21,0x4e,0x49,0xbe,0x44,0xd9,0xa3,0xeb,0xd4,0x29,0xe7,0x9e,0xaf,0x78,0x80,0x40,0x09,0x9e,0x8d,0x03,0x9c,0x86,0x47,0x7a,0x56,0x25,0x45,0x24,0x3b,0x8d,0xee,0x80,0x96,0xab,0x02,0x9a,0x0d,0xe5,0xdd,0x85,0x8a,0xa4,0xef,0x49,0xa2,0xb9,0x0f,0x4e,0x22,0x9a,0x21,0xd9,0xf6,0x1e,0xd9,0x1d,0x1f,0x09,0xfa,0x34,0xbb,0x46,0xea,0xcb,0x76,0x5d,0x6b},
+ {0x94,0xd9,0x0c,0xec,0x6c,0x55,0x57,0x88,0xba,0x1d,0xd0,0x5c,0x6f,0xdc,0x72,0x64,0x77,0xb4,0x42,0x8f,0x14,0x69,0x01,0xaf,0x54,0x73,0x27,0x85,0xf6,0x33,0xe3,0x0a,0x22,0x25,0x78,0x1e,0x17,0x41,0xf9,0xe0,0xd3,0x36,0x69,0x03,0x74,0xae,0xe6,0xf1,0x46,0xc7,0xfc,0xd0,0xa2,0x3e,0x8b,0x40,0x3e,0x31,0xdd,0x03,0x9c,0x86,0xfb,0x16,0x62,0x09,0xb6,0x33,0x97,0x19,0x8e,0x28,0x33,0xe1,0xab,0xd8,0xb4,0x72,0xfc,0x24,0x3e,0xd0,0x91,0x09,0xed,0xf7,0x11,0x48,0x75,0xd0,0x70,0x8f,0x8b,0xe3,0x81,0x3f},
+ {0xfe,0xaf,0xd9,0x7e,0xcc,0x0f,0x91,0x7f,0x4b,0x87,0x65,0x24,0xa1,0xb8,0x5c,0x54,0x04,0x47,0x0c,0x4b,0xd2,0x7e,0x39,0xa8,0x93,0x09,0xf5,0x04,0xc1,0x0f,0x51,0x50,0x24,0xc8,0x17,0x5f,0x35,0x7f,0xdb,0x0a,0xa4,0x99,0x42,0xd7,0xc3,0x23,0xb9,0x74,0xf7,0xea,0xf8,0xcb,0x8b,0x3e,0x7c,0xd5,0x3d,0xdc,0xde,0x4c,0xd3,0xe2,0xd3,0x0a,0x9d,0x24,0x6e,0x33,0xc5,0x0f,0x0c,0x6f,0xd9,0xcf,0x31,0xc3,0x19,0xde,0x5e,0x74,0x1c,0xfe,0xee,0x09,0x00,0xfd,0xd6,0xf2,0xbe,0x1e,0xfa,0xf0,0x8b,0x15,0x7c,0x12},
+ {0xa2,0x79,0x98,0x2e,0x42,0x7c,0x19,0xf6,0x47,0x36,0xca,0x52,0xd4,0xdd,0x4a,0xa4,0xcb,0xac,0x4e,0x4b,0xc1,0x3f,0x41,0x9b,0x68,0x4f,0xef,0x07,0x7d,0xf8,0x4e,0x35,0x74,0xb9,0x51,0xae,0xc4,0x8f,0xa2,0xde,0x96,0xfe,0x4d,0x74,0xd3,0x73,0x99,0x1d,0xa8,0x48,0x38,0x87,0x0b,0x68,0x40,0x62,0x95,0xdf,0x67,0xd1,0x79,0x24,0xd8,0x4e,0x75,0xd9,0xc5,0x60,0x22,0xb5,0xe3,0xfe,0xb8,0xb0,0x41,0xeb,0xfc,0x2e,0x35,0x50,0x3c,0x65,0xf6,0xa9,0x30,0xac,0x08,0x88,0x6d,0x23,0x39,0x05,0xd2,0x92,0x2d,0x30},
+ {0x3d,0x28,0xa4,0xbc,0xa2,0xc1,0x13,0x78,0xd9,0x3d,0x86,0xa1,0x91,0xf0,0x62,0xed,0x86,0xfa,0x68,0xc2,0xb8,0xbc,0xc7,0xae,0x4c,0xae,0x1c,0x6f,0xb7,0xd3,0xe5,0x10,0x77,0xf1,0xe0,0xe4,0xb6,0x6f,0xbc,0x2d,0x93,0x6a,0xbd,0xa4,0x29,0xbf,0xe1,0x04,0xe8,0xf6,0x7a,0x78,0xd4,0x66,0x19,0x5e,0x60,0xd0,0x26,0xb4,0x5e,0x5f,0xdc,0x0e,0x67,0x8e,0xda,0x53,0xd6,0xbf,0x53,0x54,0x41,0xf6,0xa9,0x24,0xec,0x1e,0xdc,0xe9,0x23,0x8a,0x57,0x03,0x3b,0x26,0x87,0xbf,0x72,0xba,0x1c,0x36,0x51,0x6c,0xb4,0x45},
+ {0xa1,0x7f,0x4f,0x31,0xbf,0x2a,0x40,0xa9,0x50,0xf4,0x8c,0x8e,0xdc,0xf1,0x57,0xe2,0x84,0xbe,0xa8,0x23,0x4b,0xd5,0xbb,0x1d,0x3b,0x71,0xcb,0x6d,0xa3,0xbf,0x77,0x21,0xe4,0xe3,0x7f,0x8a,0xdd,0x4d,0x9d,0xce,0x30,0x0e,0x62,0x76,0x56,0x64,0x13,0xab,0x58,0x99,0x0e,0xb3,0x7b,0x4f,0x59,0x4b,0xdf,0x29,0x12,0x32,0xef,0x0a,0x1c,0x5c,0x8f,0xdb,0x79,0xfa,0xbc,0x1b,0x08,0x37,0xb3,0x59,0x5f,0xc2,0x1e,0x81,0x48,0x60,0x87,0x24,0x83,0x9c,0x65,0x76,0x7a,0x08,0xbb,0xb5,0x8a,0x7d,0x38,0x19,0xe6,0x4a},
+ {0x2e,0xa3,0x44,0x53,0xaa,0xf6,0xdb,0x8d,0x78,0x40,0x1b,0xb4,0xb4,0xea,0x88,0x7d,0x60,0x0d,0x13,0x4a,0x97,0xeb,0xb0,0x5e,0x03,0x3e,0xbf,0x17,0x1b,0xd9,0x00,0x1a,0x83,0xfb,0x5b,0x98,0x44,0x7e,0x11,0x61,0x36,0x31,0x96,0x71,0x2a,0x46,0xe0,0xfc,0x4b,0x90,0x25,0xd4,0x48,0x34,0xac,0x83,0x64,0x3d,0xa4,0x5b,0xbe,0x5a,0x68,0x75,0xb2,0xf2,0x61,0xeb,0x33,0x09,0x96,0x6e,0x52,0x49,0xff,0xc9,0xa8,0x0f,0x3d,0x54,0x69,0x65,0xf6,0x7a,0x10,0x75,0x72,0xdf,0xaa,0xe6,0xb0,0x23,0xb6,0x29,0x55,0x13},
+ {0x18,0xd5,0xd1,0xad,0xd7,0xdb,0xf0,0x18,0x11,0x1f,0xc1,0xcf,0x88,0x78,0x9f,0x97,0x9b,0x75,0x14,0x71,0xf0,0xe1,0x32,0x87,0x01,0x3a,0xca,0x65,0x1a,0xb8,0xb5,0x79,0xfe,0x83,0x2e,0xe2,0xbc,0x16,0xc7,0xf5,0xc1,0x85,0x09,0xe8,0x19,0xeb,0x2b,0xb4,0xae,0x4a,0x25,0x14,0x37,0xa6,0x9d,0xec,0x13,0xa6,0x90,0x15,0x05,0xea,0x72,0x59,0x11,0x78,0x8f,0xdc,0x20,0xac,0xd4,0x0f,0xa8,0x4f,0x4d,0xac,0x94,0xd2,0x9a,0x9a,0x34,0x04,0x36,0xb3,0x64,0x2d,0x1b,0xc0,0xdb,0x3b,0x5f,0x90,0x95,0x9c,0x7e,0x4f},
+ {0x2e,0x30,0x81,0x57,0xbc,0x4b,0x67,0x62,0x0f,0xdc,0xad,0x89,0x39,0x0f,0x52,0xd8,0xc6,0xd9,0xfb,0x53,0xae,0x99,0x29,0x8c,0x4c,0x8e,0x63,0x2e,0xd9,0x3a,0x99,0x31,0xfe,0x99,0x52,0x35,0x3d,0x44,0xc8,0x71,0xd7,0xea,0xeb,0xdb,0x1c,0x3b,0xcd,0x8b,0x66,0x94,0xa4,0xf1,0x9e,0x49,0x92,0x80,0xc8,0xad,0x44,0xa1,0xc4,0xee,0x42,0x19,0x92,0x49,0x23,0xae,0x19,0x53,0xac,0x7d,0x92,0x3e,0xea,0x0c,0x91,0x3d,0x1b,0x2c,0x22,0x11,0x3c,0x25,0x94,0xe4,0x3c,0x55,0x75,0xca,0xf9,0x4e,0x31,0x65,0x0a,0x2a},
+ {0xc2,0x27,0xf9,0xf7,0x7f,0x93,0xb7,0x2d,0x35,0xa6,0xd0,0x17,0x06,0x1f,0x74,0xdb,0x76,0xaf,0x55,0x11,0xa2,0xf3,0x82,0x59,0xed,0x2d,0x7c,0x64,0x18,0xe2,0xf6,0x4c,0x3a,0x79,0x1c,0x3c,0xcd,0x1a,0x36,0xcf,0x3b,0xbc,0x35,0x5a,0xac,0xbc,0x9e,0x2f,0xab,0xa6,0xcd,0xa8,0xe9,0x60,0xe8,0x60,0x13,0x1a,0xea,0x6d,0x9b,0xc3,0x5d,0x05,0xb6,0x5b,0x8d,0xc2,0x7c,0x22,0x19,0xb1,0xab,0xff,0x4d,0x77,0xbc,0x4e,0xe2,0x07,0x89,0x2c,0xa3,0xe4,0xce,0x78,0x3c,0xa8,0xb6,0x24,0xaa,0x10,0x77,0x30,0x1a,0x12},
+ {0x97,0x4a,0x03,0x9f,0x5e,0x5d,0xdb,0xe4,0x2d,0xbc,0x34,0x30,0x09,0xfc,0x53,0xe1,0xb1,0xd3,0x51,0x95,0x91,0x46,0x05,0x46,0x2d,0xe5,0x40,0x7a,0x6c,0xc7,0x3f,0x33,0xc9,0x83,0x74,0xc7,0x3e,0x71,0x59,0xd6,0xaf,0x96,0x2b,0xb8,0x77,0xe0,0xbf,0x88,0xd3,0xbc,0x97,0x10,0x23,0x28,0x9e,0x28,0x9b,0x3a,0xed,0x6c,0x4a,0xb9,0x7b,0x52,0x2e,0x48,0x5b,0x99,0x2a,0x99,0x3d,0x56,0x01,0x38,0x38,0x6e,0x7c,0xd0,0x05,0x34,0xe5,0xd8,0x64,0x2f,0xde,0x35,0x50,0x48,0xf7,0xa9,0xa7,0x20,0x9b,0x06,0x89,0x6b},
+ {0x0d,0x22,0x70,0x62,0x41,0xa0,0x2a,0x81,0x4e,0x5b,0x24,0xf9,0xfa,0x89,0x5a,0x99,0x05,0xef,0x72,0x50,0xce,0xc4,0xad,0xff,0x73,0xeb,0x73,0xaa,0x03,0x21,0xbc,0x23,0x77,0xdb,0xc7,0xb5,0x8c,0xfa,0x82,0x40,0x55,0xc1,0x34,0xc7,0xf8,0x86,0x86,0x06,0x7e,0xa5,0xe7,0xf6,0xd9,0xc8,0xe6,0x29,0xcf,0x9b,0x63,0xa7,0x08,0xd3,0x73,0x04,0x05,0x9e,0x58,0x03,0x26,0x79,0xee,0xca,0x92,0xc4,0xdc,0x46,0x12,0x42,0x4b,0x2b,0x4f,0xa9,0x01,0xe6,0x74,0xef,0xa1,0x02,0x1a,0x34,0x04,0xde,0xbf,0x73,0x2f,0x10},
+ {0xc6,0x45,0x57,0x7f,0xab,0xb9,0x18,0xeb,0x90,0xc6,0x87,0x57,0xee,0x8a,0x3a,0x02,0xa9,0xaf,0xf7,0x2d,0xda,0x12,0x27,0xb7,0x3d,0x01,0x5c,0xea,0x25,0x7d,0x59,0x36,0x9a,0x1c,0x51,0xb5,0xe0,0xda,0xb4,0xa2,0x06,0xff,0xff,0x2b,0x29,0x60,0xc8,0x7a,0x34,0x42,0x50,0xf5,0x5d,0x37,0x1f,0x98,0x2d,0xa1,0x4e,0xda,0x25,0xd7,0x6b,0x3f,0xac,0x58,0x60,0x10,0x7b,0x8d,0x4d,0x73,0x5f,0x90,0xc6,0x6f,0x9e,0x57,0x40,0xd9,0x2d,0x93,0x02,0x92,0xf9,0xf8,0x66,0x64,0xd0,0xd6,0x60,0xda,0x19,0xcc,0x7e,0x7b},
+ {0x0d,0x69,0x5c,0x69,0x3c,0x37,0xc2,0x78,0x6e,0x90,0x42,0x06,0x66,0x2e,0x25,0xdd,0xd2,0x2b,0xe1,0x4a,0x44,0x44,0x1d,0x95,0x56,0x39,0x74,0x01,0x76,0xad,0x35,0x42,0x9b,0xfa,0x7c,0xa7,0x51,0x4a,0xae,0x6d,0x50,0x86,0xa3,0xe7,0x54,0x36,0x26,0x82,0xdb,0x82,0x2d,0x8f,0xcd,0xff,0xbb,0x09,0xba,0xca,0xf5,0x1b,0x66,0xdc,0xbe,0x03,0xf5,0x75,0x89,0x07,0x0d,0xcb,0x58,0x62,0x98,0xf2,0x89,0x91,0x54,0x42,0x29,0x49,0xe4,0x6e,0xe3,0xe2,0x23,0xb4,0xca,0xa0,0xa1,0x66,0xf0,0xcd,0xb0,0xe2,0x7c,0x0e},
+ {0xa3,0x85,0x8c,0xc4,0x3a,0x64,0x94,0xc4,0xad,0x39,0x61,0x3c,0xf4,0x1d,0x36,0xfd,0x48,0x4d,0xe9,0x3a,0xdd,0x17,0xdb,0x09,0x4a,0x67,0xb4,0x8f,0x5d,0x0a,0x6e,0x66,0xf9,0x70,0x4b,0xd9,0xdf,0xfe,0xa6,0xfe,0x2d,0xba,0xfc,0xc1,0x51,0xc0,0x30,0xf1,0x89,0xab,0x2f,0x7f,0x7e,0xd4,0x82,0x48,0xb5,0xee,0xec,0x8a,0x13,0x56,0x52,0x61,0x0d,0xcb,0x70,0x48,0x4e,0xf6,0xbb,0x2a,0x6b,0x8b,0x45,0xaa,0xf0,0xbc,0x65,0xcd,0x5d,0x98,0xe8,0x75,0xba,0x4e,0xbe,0x9a,0xe4,0xde,0x14,0xd5,0x10,0xc8,0x0b,0x7f},
+ {0x6f,0x13,0xf4,0x26,0xa4,0x6b,0x00,0xb9,0x35,0x30,0xe0,0x57,0x9e,0x36,0x67,0x8d,0x28,0x3c,0x46,0x4f,0xd9,0xdf,0xc8,0xcb,0xf5,0xdb,0xee,0xf8,0xbc,0x8d,0x1f,0x0d,0xa0,0x13,0x72,0x73,0xad,0x9d,0xac,0x83,0x98,0x2e,0xf7,0x2e,0xba,0xf8,0xf6,0x9f,0x57,0x69,0xec,0x43,0xdd,0x2e,0x1e,0x31,0x75,0xab,0xc5,0xde,0x7d,0x90,0x3a,0x1d,0xdc,0x81,0xd0,0x3e,0x31,0x93,0x16,0xba,0x80,0x34,0x1b,0x85,0xad,0x9f,0x32,0x29,0xcb,0x21,0x03,0x03,0x3c,0x01,0x28,0x01,0xe3,0xfd,0x1b,0xa3,0x44,0x1b,0x01,0x00},
+ {0x0c,0x6c,0xc6,0x3f,0x6c,0xa0,0xdf,0x3f,0xd2,0x0d,0xd6,0x4d,0x8e,0xe3,0x40,0x5d,0x71,0x4d,0x8e,0x26,0x38,0x8b,0xe3,0x7a,0xe1,0x57,0x83,0x6e,0x91,0x8d,0xc4,0x3a,0x5c,0xa7,0x0a,0x6a,0x69,0x1f,0x56,0x16,0x6a,0xbd,0x52,0x58,0x5c,0x72,0xbf,0xc1,0xad,0x66,0x79,0x9a,0x7f,0xdd,0xa8,0x11,0x26,0x10,0x85,0xd2,0xa2,0x88,0xd9,0x63,0x2e,0x23,0xbd,0xaf,0x53,0x07,0x12,0x00,0x83,0xf6,0xd8,0xfd,0xb8,0xce,0x2b,0xe9,0x91,0x2b,0xe7,0x84,0xb3,0x69,0x16,0xf8,0x66,0xa0,0x68,0x23,0x2b,0xd5,0xfa,0x33},
+ {0x16,0x1e,0xe4,0xc5,0xc6,0x49,0x06,0x54,0x35,0x77,0x3f,0x33,0x30,0x64,0xf8,0x0a,0x46,0xe7,0x05,0xf3,0xd2,0xfc,0xac,0xb2,0xa7,0xdc,0x56,0xa2,0x29,0xf4,0xc0,0x16,0xe8,0xcf,0x22,0xc4,0xd0,0xc8,0x2c,0x8d,0xcb,0x3a,0xa1,0x05,0x7b,0x4f,0x2b,0x07,0x6f,0xa5,0xf6,0xec,0xe6,0xb6,0xfe,0xa3,0xe2,0x71,0x0a,0xb9,0xcc,0x55,0xc3,0x3c,0x31,0x91,0x3e,0x90,0x43,0x94,0xb6,0xe9,0xce,0x37,0x56,0x7a,0xcb,0x94,0xa4,0xb8,0x44,0x92,0xba,0xba,0xa4,0xd1,0x7c,0xc8,0x68,0x75,0xae,0x6b,0x42,0xaf,0x1e,0x63},
+ {0x9f,0xfe,0x66,0xda,0x10,0x04,0xe9,0xb3,0xa6,0xe5,0x16,0x6c,0x52,0x4b,0xdd,0x85,0x83,0xbf,0xf9,0x1e,0x61,0x97,0x3d,0xbc,0xb5,0x19,0xa9,0x1e,0x8b,0x64,0x99,0x55,0xe8,0x0d,0x70,0xa3,0xb9,0x75,0xd9,0x47,0x52,0x05,0xf8,0xe2,0xfb,0xc5,0x80,0x72,0xe1,0x5d,0xe4,0x32,0x27,0x8f,0x65,0x53,0xb5,0x80,0x5f,0x66,0x7f,0x2c,0x1f,0x43,0x19,0x7b,0x8f,0x85,0x44,0x63,0x02,0xd6,0x4a,0x51,0xea,0xa1,0x2f,0x35,0xab,0x14,0xd7,0xa9,0x90,0x20,0x1a,0x44,0x00,0x89,0x26,0x3b,0x25,0x91,0x5f,0x71,0x04,0x7b},
+ {0x43,0xae,0xf6,0xac,0x28,0xbd,0xed,0x83,0xb4,0x7a,0x5c,0x7d,0x8b,0x7c,0x35,0x86,0x44,0x2c,0xeb,0xb7,0x69,0x47,0x40,0xc0,0x3f,0x58,0xf6,0xc2,0xf5,0x7b,0xb3,0x59,0xc6,0xba,0xe6,0xc4,0x80,0xc2,0x76,0xb3,0x0b,0x9b,0x1d,0x6d,0xdd,0xd3,0x0e,0x97,0x44,0xf9,0x0b,0x45,0x58,0x95,0x9a,0xb0,0x23,0xe2,0xcd,0x57,0xfa,0xac,0xd0,0x48,0x71,0xe6,0xab,0x7d,0xe4,0x26,0x0f,0xb6,0x37,0x3a,0x2f,0x62,0x97,0xa1,0xd1,0xf1,0x94,0x03,0x96,0xe9,0x7e,0xce,0x08,0x42,0xdb,0x3b,0x6d,0x33,0x91,0x41,0x23,0x16},
+ {0xf6,0x7f,0x26,0xf6,0xde,0x99,0xe4,0xb9,0x43,0x08,0x2c,0x74,0x7b,0xca,0x72,0x77,0xb1,0xf2,0xa4,0xe9,0x3f,0x15,0xa0,0x23,0x06,0x50,0xd0,0xd5,0xec,0xdf,0xdf,0x2c,0x40,0x86,0xf3,0x1f,0xd6,0x9c,0x49,0xdd,0xa0,0x25,0x36,0x06,0xc3,0x9b,0xcd,0x29,0xc3,0x3d,0xd7,0x3d,0x02,0xd8,0xe2,0x51,0x31,0x92,0x3b,0x20,0x7a,0x70,0x25,0x4a,0x6a,0xed,0xf6,0x53,0x8a,0x66,0xb7,0x2a,0xa1,0x70,0xd1,0x1d,0x58,0x42,0x42,0x30,0x61,0x01,0xe2,0x3a,0x4c,0x14,0x00,0x40,0xfc,0x49,0x8e,0x24,0x6d,0x89,0x21,0x57},
+ {0xae,0x1b,0x18,0xfd,0x17,0x55,0x6e,0x0b,0xb4,0x63,0xb9,0x2b,0x9f,0x62,0x22,0x90,0x25,0x46,0x06,0x32,0xe9,0xbc,0x09,0x55,0xda,0x13,0x3c,0xf6,0x74,0xdd,0x8e,0x57,0x4e,0xda,0xd0,0xa1,0x91,0x50,0x5d,0x28,0x08,0x3e,0xfe,0xb5,0xa7,0x6f,0xaa,0x4b,0xb3,0x93,0x93,0xe1,0x7c,0x17,0xe5,0x63,0xfd,0x30,0xb0,0xc4,0xaf,0x35,0xc9,0x03,0x3d,0x0c,0x2b,0x49,0xc6,0x76,0x72,0x99,0xfc,0x05,0xe2,0xdf,0xc4,0xc2,0xcc,0x47,0x3c,0x3a,0x62,0xdd,0x84,0x9b,0xd2,0xdc,0xa2,0xc7,0x88,0x02,0x59,0xab,0xc2,0x3e},
+ {0xb9,0x7b,0xd8,0xe4,0x7b,0xd2,0xa0,0xa1,0xed,0x1a,0x39,0x61,0xeb,0x4d,0x8b,0xa9,0x83,0x9b,0xcb,0x73,0xd0,0xdd,0xa0,0x99,0xce,0xca,0x0f,0x20,0x5a,0xc2,0xd5,0x2d,0xcb,0xd1,0x32,0xae,0x09,0x3a,0x21,0xa7,0xd5,0xc2,0xf5,0x40,0xdf,0x87,0x2b,0x0f,0x29,0xab,0x1e,0xe8,0xc6,0xa4,0xae,0x0b,0x5e,0xac,0xdb,0x6a,0x6c,0xf6,0x1b,0x0e,0x7e,0x88,0x2c,0x79,0xe9,0xd5,0xab,0xe2,0x5d,0x6d,0x92,0xcb,0x18,0x00,0x02,0x1a,0x1e,0x5f,0xae,0xba,0xcd,0x69,0xba,0xbf,0x5f,0x8f,0xe8,0x5a,0xb3,0x48,0x05,0x73},
+ {0xee,0xb8,0xa8,0xcb,0xa3,0x51,0x35,0xc4,0x16,0x5f,0x11,0xb2,0x1d,0x6f,0xa2,0x65,0x50,0x38,0x8c,0xab,0x52,0x4f,0x0f,0x76,0xca,0xb8,0x1d,0x41,0x3b,0x44,0x43,0x30,0x34,0xe3,0xd6,0xa1,0x4b,0x09,0x5b,0x80,0x19,0x3f,0x35,0x09,0x77,0xf1,0x3e,0xbf,0x2b,0x70,0x22,0x06,0xcb,0x06,0x3f,0x42,0xdd,0x45,0x78,0xd8,0x77,0x22,0x5a,0x58,0x62,0x89,0xd4,0x33,0x82,0x5f,0x8a,0xa1,0x7f,0x25,0x78,0xec,0xb5,0xc4,0x98,0x66,0xff,0x41,0x3e,0x37,0xa5,0x6f,0x8e,0xa7,0x1f,0x98,0xef,0x50,0x89,0x27,0x56,0x76},
+ {0xc0,0xc8,0x1f,0xd5,0x59,0xcf,0xc3,0x38,0xf2,0xb6,0x06,0x05,0xfd,0xd2,0xed,0x9b,0x8f,0x0e,0x57,0xab,0x9f,0x10,0xbf,0x26,0xa6,0x46,0xb8,0xc1,0xa8,0x60,0x41,0x3f,0x9d,0xcf,0x86,0xea,0xa3,0x73,0x70,0xe1,0xdc,0x5f,0x15,0x07,0xb7,0xfb,0x8c,0x3a,0x8e,0x8a,0x83,0x31,0xfc,0xe7,0x53,0x48,0x16,0xf6,0x13,0xb6,0x84,0xf4,0xbb,0x28,0x7c,0x6c,0x13,0x6f,0x5c,0x2f,0x61,0xf2,0xbe,0x11,0xdd,0xf6,0x07,0xd1,0xea,0xaf,0x33,0x6f,0xde,0x13,0xd2,0x9a,0x7e,0x52,0x5d,0xf7,0x88,0x81,0x35,0xcb,0x79,0x1e},
+ {0xf1,0xe3,0xf7,0xee,0xc3,0x36,0x34,0x01,0xf8,0x10,0x9e,0xfe,0x7f,0x6a,0x8b,0x82,0xfc,0xde,0xf9,0xbc,0xe5,0x08,0xf9,0x7f,0x31,0x38,0x3b,0x3a,0x1b,0x95,0xd7,0x65,0x81,0x81,0xe0,0xf5,0xd8,0x53,0xe9,0x77,0xd9,0xde,0x9d,0x29,0x44,0x0c,0xa5,0x84,0xe5,0x25,0x45,0x86,0x0c,0x2d,0x6c,0xdc,0xf4,0xf2,0xd1,0x39,0x2d,0xb5,0x8a,0x47,0x59,0xd1,0x52,0x92,0xd3,0xa4,0xa6,0x66,0x07,0xc8,0x1a,0x87,0xbc,0xe1,0xdd,0xe5,0x6f,0xc9,0xc1,0xa6,0x40,0x6b,0x2c,0xb8,0x14,0x22,0x21,0x1a,0x41,0x7a,0xd8,0x16},
+ {0x15,0x62,0x06,0x42,0x5a,0x7e,0xbd,0xb3,0xc1,0x24,0x5a,0x0c,0xcd,0xe3,0x9b,0x87,0xb7,0x94,0xf9,0xd6,0xb1,0x5d,0xc0,0x57,0xa6,0x8c,0xf3,0x65,0x81,0x7c,0xf8,0x28,0x83,0x05,0x4e,0xd5,0xe2,0xd5,0xa4,0xfb,0xfa,0x99,0xbd,0x2e,0xd7,0xaf,0x1f,0xe2,0x8f,0x77,0xe9,0x6e,0x73,0xc2,0x7a,0x49,0xde,0x6d,0x5a,0x7a,0x57,0x0b,0x99,0x1f,0xd6,0xf7,0xe8,0x1b,0xad,0x4e,0x34,0xa3,0x8f,0x79,0xea,0xac,0xeb,0x50,0x1e,0x7d,0x52,0xe0,0x0d,0x52,0x9e,0x56,0xc6,0x77,0x3e,0x6d,0x4d,0x53,0xe1,0x2f,0x88,0x45},
+ {0xd6,0x83,0x79,0x75,0x5d,0x34,0x69,0x66,0xa6,0x11,0xaa,0x17,0x11,0xed,0xb6,0x62,0x8f,0x12,0x5e,0x98,0x57,0x18,0xdd,0x7d,0xdd,0xf6,0x26,0xf6,0xb8,0xe5,0x8f,0x68,0xe4,0x6f,0x3c,0x94,0x29,0x99,0xac,0xd8,0xa2,0x92,0x83,0xa3,0x61,0xf1,0xf9,0xb5,0xf3,0x9a,0xc8,0xbe,0x13,0xdb,0x99,0x26,0x74,0xf0,0x05,0xe4,0x3c,0x84,0xcf,0x7d,0xc0,0x32,0x47,0x4a,0x48,0xd6,0x90,0x6c,0x99,0x32,0x56,0xca,0xfd,0x43,0x21,0xd5,0xe1,0xc6,0x5d,0x91,0xc3,0x28,0xbe,0xb3,0x1b,0x19,0x27,0x73,0x7e,0x68,0x39,0x67},
+ {0xa6,0x75,0x56,0x38,0x14,0x20,0x78,0xef,0xe8,0xa9,0xfd,0xaa,0x30,0x9f,0x64,0xa2,0xcb,0xa8,0xdf,0x5c,0x50,0xeb,0xd1,0x4c,0xb3,0xc0,0x4d,0x1d,0xba,0x5a,0x11,0x46,0xc0,0x1a,0x0c,0xc8,0x9d,0xcc,0x6d,0xa6,0x36,0xa4,0x38,0x1b,0xf4,0x5c,0xa0,0x97,0xc6,0xd7,0xdb,0x95,0xbe,0xf3,0xeb,0xa7,0xab,0x7d,0x7e,0x8d,0xf6,0xb8,0xa0,0x7d,0x76,0xda,0xb5,0xc3,0x53,0x19,0x0f,0xd4,0x9b,0x9e,0x11,0x21,0x73,0x6f,0xac,0x1d,0x60,0x59,0xb2,0xfe,0x21,0x60,0xcc,0x03,0x4b,0x4b,0x67,0x83,0x7e,0x88,0x5f,0x5a},
+ {0x11,0x3d,0xa1,0x70,0xcf,0x01,0x63,0x8f,0xc4,0xd0,0x0d,0x35,0x15,0xb8,0xce,0xcf,0x7e,0xa4,0xbc,0xa4,0xd4,0x97,0x02,0xf7,0x34,0x14,0x4d,0xe4,0x56,0xb6,0x69,0x36,0xb9,0x43,0xa6,0xa0,0xd3,0x28,0x96,0x9e,0x64,0x20,0xc3,0xe6,0x00,0xcb,0xc3,0xb5,0x32,0xec,0x2d,0x7c,0x89,0x02,0x53,0x9b,0x0c,0xc7,0xd1,0xd5,0xe2,0x7a,0xe3,0x43,0x33,0xe1,0xa6,0xed,0x06,0x3f,0x7e,0x38,0xc0,0x3a,0xa1,0x99,0x51,0x1d,0x30,0x67,0x11,0x38,0x26,0x36,0xf8,0xd8,0x5a,0xbd,0xbe,0xe9,0xd5,0x4f,0xcd,0xe6,0x21,0x6a},
+ {0x5f,0xe6,0x46,0x30,0x0a,0x17,0xc6,0xf1,0x24,0x35,0xd2,0x00,0x2a,0x2a,0x71,0x58,0x55,0xb7,0x82,0x8c,0x3c,0xbd,0xdb,0x69,0x57,0xff,0x95,0xa1,0xf1,0xf9,0x6b,0x58,0xe3,0xb2,0x99,0x66,0x12,0x29,0x41,0xef,0x01,0x13,0x8d,0x70,0x47,0x08,0xd3,0x71,0xbd,0xb0,0x82,0x11,0xd0,0x32,0x54,0x32,0x36,0x8b,0x1e,0x00,0x07,0x1b,0x37,0x45,0x0b,0x79,0xf8,0x5e,0x8d,0x08,0xdb,0xa6,0xe5,0x37,0x09,0x61,0xdc,0xf0,0x78,0x52,0xb8,0x6e,0xa1,0x61,0xd2,0x49,0x03,0xac,0x79,0x21,0xe5,0x90,0x37,0xb0,0xaf,0x0e},
+ {0x2f,0x04,0x48,0x37,0xc1,0x55,0x05,0x96,0x11,0xaa,0x0b,0x82,0xe6,0x41,0x9a,0x21,0x0c,0x6d,0x48,0x73,0x38,0xf7,0x81,0x1c,0x61,0xc6,0x02,0x5a,0x67,0xcc,0x9a,0x30,0x1d,0xae,0x75,0x0f,0x5e,0x80,0x40,0x51,0x30,0xcc,0x62,0x26,0xe3,0xfb,0x02,0xec,0x6d,0x39,0x92,0xea,0x1e,0xdf,0xeb,0x2c,0xb3,0x5b,0x43,0xc5,0x44,0x33,0xae,0x44,0xee,0x43,0xa5,0xbb,0xb9,0x89,0xf2,0x9c,0x42,0x71,0xc9,0x5a,0x9d,0x0e,0x76,0xf3,0xaa,0x60,0x93,0x4f,0xc6,0xe5,0x82,0x1d,0x8f,0x67,0x94,0x7f,0x1b,0x22,0xd5,0x62},
+ {0x6d,0x93,0xd0,0x18,0x9c,0x29,0x4c,0x52,0x0c,0x1a,0x0c,0x8a,0x6c,0xb5,0x6b,0xc8,0x31,0x86,0x4a,0xdb,0x2e,0x05,0x75,0xa3,0x62,0x45,0x75,0xbc,0xe4,0xfd,0x0e,0x5c,0x3c,0x7a,0xf7,0x3a,0x26,0xd4,0x85,0x75,0x4d,0x14,0xe9,0xfe,0x11,0x7b,0xae,0xdf,0x3d,0x19,0xf7,0x59,0x80,0x70,0x06,0xa5,0x37,0x20,0x92,0x83,0x53,0x9a,0xf2,0x14,0xf5,0xd7,0xb2,0x25,0xdc,0x7e,0x71,0xdf,0x40,0x30,0xb5,0x99,0xdb,0x70,0xf9,0x21,0x62,0x4c,0xed,0xc3,0xb7,0x34,0x92,0xda,0x3e,0x09,0xee,0x7b,0x5c,0x36,0x72,0x5e},
+ {0x7f,0x21,0x71,0x45,0x07,0xfc,0x5b,0x57,0x5b,0xd9,0x94,0x06,0x5d,0x67,0x79,0x37,0x33,0x1e,0x19,0xf4,0xbb,0x37,0x0a,0x9a,0xbc,0xea,0xb4,0x47,0x4c,0x10,0xf1,0x77,0x3e,0xb3,0x08,0x2f,0x06,0x39,0x93,0x7d,0xbe,0x32,0x9f,0xdf,0xe5,0x59,0x96,0x5b,0xfd,0xbd,0x9e,0x1f,0xad,0x3d,0xff,0xac,0xb7,0x49,0x73,0xcb,0x55,0x05,0xb2,0x70,0x4c,0x2c,0x11,0x55,0xc5,0x13,0x51,0xbe,0xcd,0x1f,0x88,0x9a,0x3a,0x42,0x88,0x66,0x47,0x3b,0x50,0x5e,0x85,0x77,0x66,0x44,0x4a,0x40,0x06,0x4a,0x8f,0x39,0x34,0x0e},
+ {0xe8,0xbd,0xce,0x3e,0xd9,0x22,0x7d,0xb6,0x07,0x2f,0x82,0x27,0x41,0xe8,0xb3,0x09,0x8d,0x6d,0x5b,0xb0,0x1f,0xa6,0x3f,0x74,0x72,0x23,0x36,0x8a,0x36,0x05,0x54,0x5e,0x28,0x19,0x4b,0x3e,0x09,0x0b,0x93,0x18,0x40,0xf6,0xf3,0x73,0x0e,0xe1,0xe3,0x7d,0x6f,0x5d,0x39,0x73,0xda,0x17,0x32,0xf4,0x3e,0x9c,0x37,0xca,0xd6,0xde,0x8a,0x6f,0x9a,0xb2,0xb7,0xfd,0x3d,0x12,0x40,0xe3,0x91,0xb2,0x1a,0xa2,0xe1,0x97,0x7b,0x48,0x9e,0x94,0xe6,0xfd,0x02,0x7d,0x96,0xf9,0x97,0xde,0xd3,0xc8,0x2e,0xe7,0x0d,0x78},
+ {0xbc,0xe7,0x9a,0x08,0x45,0x85,0xe2,0x0a,0x06,0x4d,0x7f,0x1c,0xcf,0xde,0x8d,0x38,0xb8,0x11,0x48,0x0a,0x51,0x15,0xac,0x38,0xe4,0x8c,0x92,0x71,0xf6,0x8b,0xb2,0x0e,0x72,0x27,0xf4,0x00,0xf3,0xea,0x1f,0x67,0xaa,0x41,0x8c,0x2a,0x2a,0xeb,0x72,0x8f,0x92,0x32,0x37,0x97,0xd7,0x7f,0xa1,0x29,0xa6,0x87,0xb5,0x32,0xad,0xc6,0xef,0x1d,0xa7,0x95,0x51,0xef,0x1a,0xbe,0x5b,0xaf,0xed,0x15,0x7b,0x91,0x77,0x12,0x8c,0x14,0x2e,0xda,0xe5,0x7a,0xfb,0xf7,0x91,0x29,0x67,0x28,0xdd,0xf8,0x1b,0x20,0x7d,0x46},
+ {0xad,0x4f,0xef,0x74,0x9a,0x91,0xfe,0x95,0xa2,0x08,0xa3,0xf6,0xec,0x7b,0x82,0x3a,0x01,0x7b,0xa4,0x09,0xd3,0x01,0x4e,0x96,0x97,0xc7,0xa3,0x5b,0x4f,0x3c,0xc4,0x71,0xa9,0xe7,0x7a,0x56,0xbd,0xf4,0x1e,0xbc,0xbd,0x98,0x44,0xd6,0xb2,0x4c,0x62,0x3f,0xc8,0x4e,0x1f,0x2c,0xd2,0x64,0x10,0xe4,0x01,0x40,0x38,0xba,0xa5,0xc5,0xf9,0x2e,0xcd,0x74,0x9e,0xfa,0xf6,0x6d,0xfd,0xb6,0x7a,0x26,0xaf,0xe4,0xbc,0x78,0x82,0xf1,0x0e,0x99,0xef,0xf1,0xd0,0xb3,0x55,0x82,0x93,0xf2,0xc5,0x90,0xa3,0x8c,0x75,0x5a},
+ {0x95,0x24,0x46,0xd9,0x10,0x27,0xb7,0xa2,0x03,0x50,0x7d,0xd5,0xd2,0xc6,0xa8,0x3a,0xca,0x87,0xb4,0xa0,0xbf,0x00,0xd4,0xe3,0xec,0x72,0xeb,0xb3,0x44,0xe2,0xba,0x2d,0x94,0xdc,0x61,0x1d,0x8b,0x91,0xe0,0x8c,0x66,0x30,0x81,0x9a,0x46,0x36,0xed,0x8d,0xd3,0xaa,0xe8,0xaf,0x29,0xa8,0xe6,0xd4,0x3f,0xd4,0x39,0xf6,0x27,0x80,0x73,0x0a,0xcc,0xe1,0xff,0x57,0x2f,0x4a,0x0f,0x98,0x43,0x98,0x83,0xe1,0x0d,0x0d,0x67,0x00,0xfd,0x15,0xfb,0x49,0x4a,0x3f,0x5c,0x10,0x9c,0xa6,0x26,0x51,0x63,0xca,0x98,0x26},
+ {0x78,0xba,0xb0,0x32,0x88,0x31,0x65,0xe7,0x8b,0xff,0x5c,0x92,0xf7,0x31,0x18,0x38,0xcc,0x1f,0x29,0xa0,0x91,0x1b,0xa8,0x08,0x07,0xeb,0xca,0x49,0xcc,0x3d,0xb4,0x1f,0x0e,0xd9,0x3d,0x5e,0x2f,0x70,0x3d,0x2e,0x86,0x53,0xd2,0xe4,0x18,0x09,0x3f,0x9e,0x6a,0xa9,0x4d,0x02,0xf6,0x3e,0x77,0x5e,0x32,0x33,0xfa,0x4a,0x0c,0x4b,0x00,0x3c,0x2b,0xb8,0xf4,0x06,0xac,0x46,0xa9,0x9a,0xf3,0xc4,0x06,0xa8,0xa5,0x84,0xa2,0x1c,0x87,0x47,0xcd,0xc6,0x5f,0x26,0xd3,0x3e,0x17,0xd2,0x1f,0xcd,0x01,0xfd,0x43,0x6b},
+ {0x44,0xc5,0x97,0x46,0x4b,0x5d,0xa7,0xc7,0xbf,0xff,0x0f,0xdf,0x48,0xf8,0xfd,0x15,0x5a,0x78,0x46,0xaa,0xeb,0xb9,0x68,0x28,0x14,0xf7,0x52,0x5b,0x10,0xd7,0x68,0x5a,0xf3,0x0e,0x76,0x3e,0x58,0x42,0xc7,0xb5,0x90,0xb9,0x0a,0xee,0xb9,0x52,0xdc,0x75,0x3f,0x92,0x2b,0x07,0xc2,0x27,0x14,0xbf,0xf0,0xd9,0xf0,0x6f,0x2d,0x0b,0x42,0x73,0x06,0x1e,0x85,0x9e,0xcb,0xf6,0x2c,0xaf,0xc4,0x38,0x22,0xc6,0x13,0x39,0x59,0x8f,0x73,0xf3,0xfb,0x99,0x96,0xb8,0x8a,0xda,0x9e,0xbc,0x34,0xea,0x2f,0x63,0xb5,0x3d},
+ {0xd8,0xd9,0x5d,0xf7,0x2b,0xee,0x6e,0xf4,0xa5,0x59,0x67,0x39,0xf6,0xb1,0x17,0x0d,0x73,0x72,0x9e,0x49,0x31,0xd1,0xf2,0x1b,0x13,0x5f,0xd7,0x49,0xdf,0x1a,0x32,0x04,0xd5,0x25,0x98,0x82,0xb1,0x90,0x49,0x2e,0x91,0x89,0x9a,0x3e,0x87,0xeb,0xea,0xed,0xf8,0x4a,0x70,0x4c,0x39,0x3d,0xf0,0xee,0x0e,0x2b,0xdf,0x95,0xa4,0x7e,0x19,0x59,0xae,0x5a,0xe5,0xe4,0x19,0x60,0xe1,0x04,0xe9,0x92,0x2f,0x7e,0x7a,0x43,0x7b,0xe7,0xa4,0x9a,0x15,0x6f,0xc1,0x2d,0xce,0xc7,0xc0,0x0c,0xd7,0xf4,0xc1,0xfd,0xea,0x45},
+ {0x2b,0xd7,0x45,0x80,0x85,0x01,0x84,0x69,0x51,0x06,0x2f,0xcf,0xa2,0xfa,0x22,0x4c,0xc6,0x2d,0x22,0x6b,0x65,0x36,0x1a,0x94,0xde,0xda,0x62,0x03,0xc8,0xeb,0x5e,0x5a,0xed,0xb1,0xcc,0xcf,0x24,0x46,0x0e,0xb6,0x95,0x03,0x5c,0xbd,0x92,0xc2,0xdb,0x59,0xc9,0x81,0x04,0xdc,0x1d,0x9d,0xa0,0x31,0x40,0xd9,0x56,0x5d,0xea,0xce,0x73,0x3f,0xc6,0x8d,0x4e,0x0a,0xd1,0xbf,0xa7,0xb7,0x39,0xb3,0xc9,0x44,0x7e,0x00,0x57,0xbe,0xfa,0xae,0x57,0x15,0x7f,0x20,0xc1,0x60,0xdb,0x18,0x62,0x26,0x91,0x88,0x05,0x26},
+ {0x04,0xff,0x60,0x83,0xa6,0x04,0xf7,0x59,0xf4,0xe6,0x61,0x76,0xde,0x3f,0xd9,0xc3,0x51,0x35,0x87,0x12,0x73,0x2a,0x1b,0x83,0x57,0x5d,0x61,0x4e,0x2e,0x0c,0xad,0x54,0x42,0xe5,0x76,0xc6,0x3c,0x8e,0x81,0x4c,0xad,0xcc,0xce,0x03,0x93,0x2c,0x42,0x5e,0x08,0x9f,0x12,0xb4,0xca,0xcc,0x07,0xec,0xb8,0x43,0x44,0xb2,0x10,0xfa,0xed,0x0d,0x2a,0x52,0x2b,0xb8,0xd5,0x67,0x3b,0xee,0xeb,0xc1,0xa5,0x9f,0x46,0x63,0xf1,0x36,0xd3,0x9f,0xc1,0x6e,0xf2,0xd2,0xb4,0xa5,0x08,0x94,0x7a,0xa7,0xba,0xb2,0xec,0x62},
+ {0x3d,0x2b,0x15,0x61,0x52,0x79,0xed,0xe5,0xd1,0xd7,0xdd,0x0e,0x7d,0x35,0x62,0x49,0x71,0x4c,0x6b,0xb9,0xd0,0xc8,0x82,0x74,0xbe,0xd8,0x66,0xa9,0x19,0xf9,0x59,0x2e,0x74,0x28,0xb6,0xaf,0x36,0x28,0x07,0x92,0xa5,0x04,0xe1,0x79,0x85,0x5e,0xcd,0x5f,0x4a,0xa1,0x30,0xc6,0xad,0x01,0xad,0x5a,0x98,0x3f,0x66,0x75,0x50,0x3d,0x91,0x61,0xda,0x31,0x32,0x1a,0x36,0x2d,0xc6,0x0d,0x70,0x02,0x20,0x94,0x32,0x58,0x47,0xfa,0xce,0x94,0x95,0x3f,0x51,0x01,0xd8,0x02,0x5c,0x5d,0xc0,0x31,0xa1,0xc2,0xdb,0x3d},
+ {0x4b,0xc5,0x5e,0xce,0xf9,0x0f,0xdc,0x9a,0x0d,0x13,0x2f,0x8c,0x6b,0x2a,0x9c,0x03,0x15,0x95,0xf8,0xf0,0xc7,0x07,0x80,0x02,0x6b,0xb3,0x04,0xac,0x14,0x83,0x96,0x78,0x14,0xbb,0x96,0x27,0xa2,0x57,0xaa,0xf3,0x21,0xda,0x07,0x9b,0xb7,0xba,0x3a,0x88,0x1c,0x39,0xa0,0x31,0x18,0xe2,0x4b,0xe5,0xf9,0x05,0x32,0xd8,0x38,0xfb,0xe7,0x5e,0x8e,0x6a,0x44,0x41,0xcb,0xfd,0x8d,0x53,0xf9,0x37,0x49,0x43,0xa9,0xfd,0xac,0xa5,0x78,0x8c,0x3c,0x26,0x8d,0x90,0xaf,0x46,0x09,0x0d,0xca,0x9b,0x3c,0x63,0xd0,0x61},
+ {0x66,0x25,0xdb,0xff,0x35,0x49,0x74,0x63,0xbb,0x68,0x0b,0x78,0x89,0x6b,0xbd,0xc5,0x03,0xec,0x3e,0x55,0x80,0x32,0x1b,0x6f,0xf5,0xd7,0xae,0x47,0xd8,0x5f,0x96,0x6e,0xdf,0x73,0xfc,0xf8,0xbc,0x28,0xa3,0xad,0xfc,0x37,0xf0,0xa6,0x5d,0x69,0x84,0xee,0x09,0xa9,0xc2,0x38,0xdb,0xb4,0x7f,0x63,0xdc,0x7b,0x06,0xf8,0x2d,0xac,0x23,0x5b,0x7b,0x52,0x80,0xee,0x53,0xb9,0xd2,0x9a,0x8d,0x6d,0xde,0xfa,0xaa,0x19,0x8f,0xe8,0xcf,0x82,0x0e,0x15,0x04,0x17,0x71,0x0e,0xdc,0xde,0x95,0xdd,0xb9,0xbb,0xb9,0x79},
+ {0xc2,0x26,0x31,0x6a,0x40,0x55,0xb3,0xeb,0x93,0xc3,0xc8,0x68,0xa8,0x83,0x63,0xd2,0x82,0x7a,0xb9,0xe5,0x29,0x64,0x0c,0x6c,0x47,0x21,0xfd,0xc9,0x58,0xf1,0x65,0x50,0x74,0x73,0x9f,0x8e,0xae,0x7d,0x99,0xd1,0x16,0x08,0xbb,0xcf,0xf8,0xa2,0x32,0xa0,0x0a,0x5f,0x44,0x6d,0x12,0xba,0x6c,0xcd,0x34,0xb8,0xcc,0x0a,0x46,0x11,0xa8,0x1b,0x54,0x99,0x42,0x0c,0xfb,0x69,0x81,0x70,0x67,0xcf,0x6e,0xd7,0xac,0x00,0x46,0xe1,0xba,0x45,0xe6,0x70,0x8a,0xb9,0xaa,0x2e,0xf2,0xfa,0xa4,0x58,0x9e,0xf3,0x81,0x39},
+ {0x93,0x0a,0x23,0x59,0x75,0x8a,0xfb,0x18,0x5d,0xf4,0xe6,0x60,0x69,0x8f,0x16,0x1d,0xb5,0x3c,0xa9,0x14,0x45,0xa9,0x85,0x3a,0xfd,0xd0,0xac,0x05,0x37,0x08,0xdc,0x38,0xde,0x6f,0xe6,0x6d,0xa5,0xdf,0x45,0xc8,0x3a,0x48,0x40,0x2c,0x00,0xa5,0x52,0xe1,0x32,0xf6,0xb4,0xc7,0x63,0xe1,0xd2,0xe9,0x65,0x1b,0xbc,0xdc,0x2e,0x45,0xf4,0x30,0x40,0x97,0x75,0xc5,0x82,0x27,0x6d,0x85,0xcc,0xbe,0x9c,0xf9,0x69,0x45,0x13,0xfa,0x71,0x4e,0xea,0xc0,0x73,0xfc,0x44,0x88,0x69,0x24,0x3f,0x59,0x1a,0x9a,0x2d,0x63},
+ {0xa6,0xcb,0x07,0xb8,0x15,0x6b,0xbb,0xf6,0xd7,0xf0,0x54,0xbc,0xdf,0xc7,0x23,0x18,0x0b,0x67,0x29,0x6e,0x03,0x97,0x1d,0xbb,0x57,0x4a,0xed,0x47,0x88,0xf4,0x24,0x0b,0xa7,0x84,0x0c,0xed,0x11,0xfd,0x09,0xbf,0x3a,0x69,0x9f,0x0d,0x81,0x71,0xf0,0x63,0x79,0x87,0xcf,0x57,0x2d,0x8c,0x90,0x21,0xa2,0x4b,0xf6,0x8a,0xf2,0x7d,0x5a,0x3a,0xc7,0xea,0x1b,0x51,0xbe,0xd4,0xda,0xdc,0xf2,0xcc,0x26,0xed,0x75,0x80,0x53,0xa4,0x65,0x9a,0x5f,0x00,0x9f,0xff,0x9c,0xe1,0x63,0x1f,0x48,0x75,0x44,0xf7,0xfc,0x34},
+ {0xca,0x67,0x97,0x78,0x4c,0xe0,0x97,0xc1,0x7d,0x46,0xd9,0x38,0xcb,0x4d,0x71,0xb8,0xa8,0x5f,0xf9,0x83,0x82,0x88,0xde,0x55,0xf7,0x63,0xfa,0x4d,0x16,0xdc,0x3b,0x3d,0x98,0xaa,0xcf,0x78,0xab,0x1d,0xbb,0xa5,0xf2,0x72,0x0b,0x19,0x67,0xa2,0xed,0x5c,0x8e,0x60,0x92,0x0a,0x11,0xc9,0x09,0x93,0xb0,0x74,0xb3,0x2f,0x04,0xa3,0x19,0x01,0x7d,0x17,0xc2,0xe8,0x9c,0xd8,0xa2,0x67,0xc1,0xd0,0x95,0x68,0xf6,0xa5,0x9d,0x66,0xb0,0xa2,0x82,0xb2,0xe5,0x98,0x65,0xf5,0x73,0x0a,0xe2,0xed,0xf1,0x88,0xc0,0x56},
+ {0x17,0x6e,0xa8,0x10,0x11,0x3d,0x6d,0x33,0xfa,0xb2,0x75,0x0b,0x32,0x88,0xf3,0xd7,0x88,0x29,0x07,0x25,0x76,0x33,0x15,0xf9,0x87,0x8b,0x10,0x99,0x6b,0x4c,0x67,0x09,0x02,0x8f,0xf3,0x24,0xac,0x5f,0x1b,0x58,0xbd,0x0c,0xe3,0xba,0xfe,0xe9,0x0b,0xa9,0xf0,0x92,0xcf,0x8a,0x02,0x69,0x21,0x9a,0x8f,0x03,0x59,0x83,0xa4,0x7e,0x8b,0x03,0xf8,0x6f,0x31,0x99,0x21,0xf8,0x4e,0x9f,0x4f,0x8d,0xa7,0xea,0x82,0xd2,0x49,0x2f,0x74,0x31,0xef,0x5a,0xab,0xa5,0x71,0x09,0x65,0xeb,0x69,0x59,0x02,0x31,0x5e,0x6e},
+ {0xfb,0x93,0xe5,0x87,0xf5,0x62,0x6c,0xb1,0x71,0x3e,0x5d,0xca,0xde,0xed,0x99,0x49,0x6d,0x3e,0xcc,0x14,0xe0,0xc1,0x91,0xb4,0xa8,0xdb,0xa8,0x89,0x47,0x11,0xf5,0x08,0x22,0x62,0x06,0x63,0x0e,0xfb,0x04,0x33,0x3f,0xba,0xac,0x87,0x89,0x06,0x35,0xfb,0xa3,0x61,0x10,0x8c,0x77,0x24,0x19,0xbd,0x20,0x86,0x83,0xd1,0x43,0xad,0x58,0x30,0xd0,0x63,0x76,0xe5,0xfd,0x0f,0x3c,0x32,0x10,0xa6,0x2e,0xa2,0x38,0xdf,0xc3,0x05,0x9a,0x4f,0x99,0xac,0xbd,0x8a,0xc7,0xbd,0x99,0xdc,0xe3,0xef,0xa4,0x9f,0x54,0x26},
+ {0xd6,0xf9,0x6b,0x1e,0x46,0x5a,0x1d,0x74,0x81,0xa5,0x77,0x77,0xfc,0xb3,0x05,0x23,0xd9,0xd3,0x74,0x64,0xa2,0x74,0x55,0xd4,0xff,0xe0,0x01,0x64,0xdc,0xe1,0x26,0x19,0x6e,0x66,0x3f,0xaf,0x49,0x85,0x46,0xdb,0xa5,0x0e,0x4a,0xf1,0x04,0xcf,0x7f,0xd7,0x47,0x0c,0xba,0xa4,0xf7,0x3f,0xf2,0x3d,0x85,0x3c,0xce,0x32,0xe1,0xdf,0x10,0x3a,0xa0,0xce,0x17,0xea,0x8a,0x4e,0x7f,0xe0,0xfd,0xc1,0x1f,0x3a,0x46,0x15,0xd5,0x2f,0xf1,0xc0,0xf2,0x31,0xfd,0x22,0x53,0x17,0x15,0x5d,0x1e,0x86,0x1d,0xd0,0xa1,0x1f},
+ {0x32,0x98,0x59,0x7d,0x94,0x55,0x80,0xcc,0x20,0x55,0xf1,0x37,0xda,0x56,0x46,0x1e,0x20,0x93,0x05,0x4e,0x74,0xf7,0xf6,0x99,0x33,0xcf,0x75,0x6a,0xbc,0x63,0x35,0x77,0xab,0x94,0xdf,0xd1,0x00,0xac,0xdc,0x38,0xe9,0x0d,0x08,0xd1,0xdd,0x2b,0x71,0x2e,0x62,0xe2,0xd5,0xfd,0x3e,0xe9,0x13,0x7f,0xe5,0x01,0x9a,0xee,0x18,0xed,0xfc,0x73,0xb3,0x9c,0x13,0x63,0x08,0xe9,0xb1,0x06,0xcd,0x3e,0xa0,0xc5,0x67,0xda,0x93,0xa4,0x32,0x89,0x63,0xad,0xc8,0xce,0x77,0x8d,0x44,0x4f,0x86,0x1b,0x70,0x6b,0x42,0x1f},
+ {0x01,0x1c,0x91,0x41,0x4c,0x26,0xc9,0xef,0x25,0x2c,0xa2,0x17,0xb8,0xb7,0xa3,0xf1,0x47,0x14,0x0f,0xf3,0x6b,0xda,0x75,0x58,0x90,0xb0,0x31,0x1d,0x27,0xf5,0x1a,0x4e,0x52,0x25,0xa1,0x91,0xc8,0x35,0x7e,0xf1,0x76,0x9c,0x5e,0x57,0x53,0x81,0x6b,0xb7,0x3e,0x72,0x9b,0x0d,0x6f,0x40,0x83,0xfa,0x38,0xe4,0xa7,0x3f,0x1b,0xbb,0x76,0x0b,0x9b,0x93,0x92,0x7f,0xf9,0xc1,0xb8,0x08,0x6e,0xab,0x44,0xd4,0xcb,0x71,0x67,0xbe,0x17,0x80,0xbb,0x99,0x63,0x64,0xe5,0x22,0x55,0xa9,0x72,0xb7,0x1e,0xd6,0x6d,0x7b},
+ {0x92,0x3d,0xf3,0x50,0xe8,0xc1,0xad,0xb7,0xcf,0xd5,0x8c,0x60,0x4f,0xfa,0x98,0x79,0xdb,0x5b,0xfc,0x8d,0xbd,0x2d,0x96,0xad,0x4f,0x2f,0x1d,0xaf,0xce,0x9b,0x3e,0x70,0xc7,0xd2,0x01,0xab,0xf9,0xab,0x30,0x57,0x18,0x3b,0x14,0x40,0xdc,0x76,0xfb,0x16,0x81,0xb2,0xcb,0xa0,0x65,0xbe,0x6c,0x86,0xfe,0x6a,0xff,0x9b,0x65,0x9b,0xfa,0x53,0x55,0x54,0x88,0x94,0xe9,0xc8,0x14,0x6c,0xe5,0xd4,0xae,0x65,0x66,0x5d,0x3a,0x84,0xf1,0x5a,0xd6,0xbc,0x3e,0xb7,0x1b,0x18,0x50,0x1f,0xc6,0xc4,0xe5,0x93,0x8d,0x39},
+ {0xf3,0x48,0xe2,0x33,0x67,0xd1,0x4b,0x1c,0x5f,0x0a,0xbf,0x15,0x87,0x12,0x9e,0xbd,0x76,0x03,0x0b,0xa1,0xf0,0x8c,0x3f,0xd4,0x13,0x1b,0x19,0xdf,0x5d,0x9b,0xb0,0x53,0xf2,0xe3,0xe7,0xd2,0x60,0x7c,0x87,0xc3,0xb1,0x8b,0x82,0x30,0xa0,0xaa,0x34,0x3b,0x38,0xf1,0x9e,0x73,0xe7,0x26,0x3e,0x28,0x77,0x05,0xc3,0x02,0x90,0x9c,0x9c,0x69,0xcc,0xf1,0x46,0x59,0x23,0xa7,0x06,0xf3,0x7d,0xd9,0xe5,0xcc,0xb5,0x18,0x17,0x92,0x75,0xe9,0xb4,0x81,0x47,0xd2,0xcd,0x28,0x07,0xd9,0xcd,0x6f,0x0c,0xf3,0xca,0x51},
+ {0x0a,0xe0,0x74,0x76,0x42,0xa7,0x0b,0xa6,0xf3,0x7b,0x7a,0xa1,0x70,0x85,0x0e,0x63,0xcc,0x24,0x33,0xcf,0x3d,0x56,0x58,0x37,0xaa,0xfd,0x83,0x23,0x29,0xaa,0x04,0x55,0xc7,0x54,0xac,0x18,0x9a,0xf9,0x7a,0x73,0x0f,0xb3,0x1c,0xc5,0xdc,0x78,0x33,0x90,0xc7,0x0c,0xe1,0x4c,0x33,0xbc,0x89,0x2b,0x9a,0xe9,0xf8,0x89,0xc1,0x29,0xae,0x12,0xcf,0x01,0x0d,0x1f,0xcb,0xc0,0x9e,0xa9,0xae,0xf7,0x34,0x3a,0xcc,0xef,0xd1,0x0d,0x22,0x4e,0x9c,0xd0,0x21,0x75,0xca,0x55,0xea,0xa5,0xeb,0x58,0xe9,0x4f,0xd1,0x5f},
+ {0x2c,0xab,0x45,0x28,0xdf,0x2d,0xdc,0xb5,0x93,0xe9,0x7f,0x0a,0xb1,0x91,0x94,0x06,0x46,0xe3,0x02,0x40,0xd6,0xf3,0xaa,0x4d,0xd1,0x74,0x64,0x58,0x6e,0xf2,0x3f,0x09,0x8e,0xcb,0x93,0xbf,0x5e,0xfe,0x42,0x3c,0x5f,0x56,0xd4,0x36,0x51,0xa8,0xdf,0xbe,0xe8,0x20,0x42,0x88,0x9e,0x85,0xf0,0xe0,0x28,0xd1,0x25,0x07,0x96,0x3f,0xd7,0x7d,0x29,0x98,0x05,0x68,0xfe,0x24,0x0d,0xb1,0xe5,0x23,0xaf,0xdb,0x72,0x06,0x73,0x75,0x29,0xac,0x57,0xb4,0x3a,0x25,0x67,0x13,0xa4,0x70,0xb4,0x86,0xbc,0xbc,0x59,0x2f},
+ {0x5f,0x13,0x17,0x99,0x42,0x7d,0x84,0x83,0xd7,0x03,0x7d,0x56,0x1f,0x91,0x1b,0xad,0xd1,0xaa,0x77,0xbe,0xd9,0x48,0x77,0x7e,0x4a,0xaf,0x51,0x2e,0x2e,0xb4,0x58,0x54,0x01,0xc3,0x91,0xb6,0x60,0xd5,0x41,0x70,0x1e,0xe7,0xd7,0xad,0x3f,0x1b,0x20,0x85,0x85,0x55,0x33,0x11,0x63,0xe1,0xc2,0x16,0xb1,0x28,0x08,0x01,0x3d,0x5e,0xa5,0x2a,0x4f,0x44,0x07,0x0c,0xe6,0x92,0x51,0xed,0x10,0x1d,0x42,0x74,0x2d,0x4e,0xc5,0x42,0x64,0xc8,0xb5,0xfd,0x82,0x4c,0x2b,0x35,0x64,0x86,0x76,0x8a,0x4a,0x00,0xe9,0x13},
+ {0xdb,0xce,0x2f,0x83,0x45,0x88,0x9d,0x73,0x63,0xf8,0x6b,0xae,0xc9,0xd6,0x38,0xfa,0xf7,0xfe,0x4f,0xb7,0xca,0x0d,0xbc,0x32,0x5e,0xe4,0xbc,0x14,0x88,0x7e,0x93,0x73,0x7f,0x87,0x3b,0x19,0xc9,0x00,0x2e,0xbb,0x6b,0x50,0xdc,0xe0,0x90,0xa8,0xe3,0xec,0x9f,0x64,0xde,0x36,0xc0,0xb7,0xf3,0xec,0x1a,0x9e,0xde,0x98,0x08,0x04,0x46,0x5f,0x8d,0xf4,0x7b,0x29,0x16,0x71,0x03,0xb9,0x34,0x68,0xf0,0xd4,0x22,0x3b,0xd1,0xa9,0xc6,0xbd,0x96,0x46,0x57,0x15,0x97,0xe1,0x35,0xe8,0xd5,0x91,0xe8,0xa4,0xf8,0x2c},
+ {0x67,0x0f,0x11,0x07,0x87,0xfd,0x93,0x6d,0x49,0xb5,0x38,0x7c,0xd3,0x09,0x4c,0xdd,0x86,0x6a,0x73,0xc2,0x4c,0x6a,0xb1,0x7c,0x09,0x2a,0x25,0x58,0x6e,0xbd,0x49,0x20,0xa2,0x6b,0xd0,0x17,0x7e,0x48,0xb5,0x2c,0x6b,0x19,0x50,0x39,0x1c,0x38,0xd2,0x24,0x30,0x8a,0x97,0x85,0x81,0x9c,0x65,0xd7,0xf6,0xa4,0xd6,0x91,0x28,0x7f,0x6f,0x7a,0x49,0xef,0x9a,0x6a,0x8d,0xfd,0x09,0x7d,0x0b,0xb9,0x3d,0x5b,0xbe,0x60,0xee,0xf0,0xd4,0xbf,0x9e,0x51,0x2c,0xb5,0x21,0x4c,0x1d,0x94,0x45,0xc5,0xdf,0xaa,0x11,0x60},
+ {0x3c,0xf8,0x95,0xcf,0x6d,0x92,0x67,0x5f,0x71,0x90,0x28,0x71,0x61,0x85,0x7e,0x7c,0x5b,0x7a,0x8f,0x99,0xf3,0xe7,0xa1,0xd6,0xe0,0xf9,0x62,0x0b,0x1b,0xcc,0xc5,0x6f,0x90,0xf8,0xcb,0x02,0xc8,0xd0,0xde,0x63,0xaa,0x6a,0xff,0x0d,0xca,0x98,0xd0,0xfb,0x99,0xed,0xb6,0xb9,0xfd,0x0a,0x4d,0x62,0x1e,0x0b,0x34,0x79,0xb7,0x18,0xce,0x69,0xcb,0x79,0x98,0xb2,0x28,0x55,0xef,0xd1,0x92,0x90,0x7e,0xd4,0x3c,0xae,0x1a,0xdd,0x52,0x23,0x9f,0x18,0x42,0x04,0x7e,0x12,0xf1,0x01,0x71,0xe5,0x3a,0x6b,0x59,0x15},
+ {0xa2,0x79,0x91,0x3f,0xd2,0x39,0x27,0x46,0xcf,0xdd,0xd6,0x97,0x31,0x12,0x83,0xff,0x8a,0x14,0xf2,0x53,0xb5,0xde,0x07,0x13,0xda,0x4d,0x5f,0x7b,0x68,0x37,0x22,0x0d,0xca,0x24,0x51,0x7e,0x16,0x31,0xff,0x09,0xdf,0x45,0xc7,0xd9,0x8b,0x15,0xe4,0x0b,0xe5,0x56,0xf5,0x7e,0x22,0x7d,0x2b,0x29,0x38,0xd1,0xb6,0xaf,0x41,0xe2,0xa4,0x3a,0xf5,0x05,0x33,0x2a,0xbf,0x38,0xc1,0x2c,0xc3,0x26,0xe9,0xa2,0x8f,0x3f,0x58,0x48,0xeb,0xd2,0x49,0x55,0xa2,0xb1,0x3a,0x08,0x6c,0xa3,0x87,0x46,0x6e,0xaa,0xfc,0x32},
+ {0xf5,0x9a,0x7d,0xc5,0x8d,0x6e,0xc5,0x7b,0xf2,0xbd,0xf0,0x9d,0xed,0xd2,0x0b,0x3e,0xa3,0xe4,0xef,0x22,0xde,0x14,0xc0,0xaa,0x5c,0x6a,0xbd,0xfe,0xce,0xe9,0x27,0x46,0xdf,0xcc,0x87,0x27,0x73,0xa4,0x07,0x32,0xf8,0xe3,0x13,0xf2,0x08,0x19,0xe3,0x17,0x4e,0x96,0x0d,0xf6,0xd7,0xec,0xb2,0xd5,0xe9,0x0b,0x60,0xc2,0x36,0x63,0x6f,0x74,0x1c,0x97,0x6c,0xab,0x45,0xf3,0x4a,0x3f,0x1f,0x73,0x43,0x99,0x72,0xeb,0x88,0xe2,0x6d,0x18,0x44,0x03,0x8a,0x6a,0x59,0x33,0x93,0x62,0xd6,0x7e,0x00,0x17,0x49,0x7b},
+ {0x64,0xb0,0x84,0xab,0x5c,0xfb,0x85,0x2d,0x14,0xbc,0xf3,0x89,0xd2,0x10,0x78,0x49,0x0c,0xce,0x15,0x7b,0x44,0xdc,0x6a,0x47,0x7b,0xfd,0x44,0xf8,0x76,0xa3,0x2b,0x12,0xdd,0xa2,0x53,0xdd,0x28,0x1b,0x34,0x54,0x3f,0xfc,0x42,0xdf,0x5b,0x90,0x17,0xaa,0xf4,0xf8,0xd2,0x4d,0xd9,0x92,0xf5,0x0f,0x7d,0xd3,0x8c,0xe0,0x0f,0x62,0x03,0x1d,0x54,0xe5,0xb4,0xa2,0xcd,0x32,0x02,0xc2,0x7f,0x18,0x5d,0x11,0x42,0xfd,0xd0,0x9e,0xd9,0x79,0xd4,0x7d,0xbe,0xb4,0xab,0x2e,0x4c,0xec,0x68,0x2b,0xf5,0x0b,0xc7,0x02},
+ {0xbb,0x2f,0x0b,0x5d,0x4b,0xec,0x87,0xa2,0xca,0x82,0x48,0x07,0x90,0x57,0x5c,0x41,0x5c,0x81,0xd0,0xc1,0x1e,0xa6,0x44,0xe0,0xe0,0xf5,0x9e,0x40,0x0a,0x4f,0x33,0x26,0xe1,0x72,0x8d,0x45,0xbf,0x32,0xe5,0xac,0xb5,0x3c,0xb7,0x7c,0xe0,0x68,0xe7,0x5b,0xe7,0xbd,0x8b,0xee,0x94,0x7d,0xcf,0x56,0x03,0x3a,0xb4,0xfe,0xe3,0x97,0x06,0x6b,0xc0,0xa3,0x62,0xdf,0x4a,0xf0,0xc8,0xb6,0x5d,0xa4,0x6d,0x07,0xef,0x00,0xf0,0x3e,0xa9,0xd2,0xf0,0x49,0x58,0xb9,0x9c,0x9c,0xae,0x2f,0x1b,0x44,0x43,0x7f,0xc3,0x1c},
+ {0x4f,0x32,0xc7,0x5c,0x5a,0x56,0x8f,0x50,0x22,0xa9,0x06,0xe5,0xc0,0xc4,0x61,0xd0,0x19,0xac,0x45,0x5c,0xdb,0xab,0x18,0xfb,0x4a,0x31,0x80,0x03,0xc1,0x09,0x68,0x6c,0xb9,0xae,0xce,0xc9,0xf1,0x56,0x66,0xd7,0x6a,0x65,0xe5,0x18,0xf8,0x15,0x5b,0x1c,0x34,0x23,0x4c,0x84,0x32,0x28,0xe7,0x26,0x38,0x68,0x19,0x2f,0x77,0x6f,0x34,0x3a,0xc8,0x6a,0xda,0xe2,0x12,0x51,0xd5,0xd2,0xed,0x51,0xe8,0xb1,0x31,0x03,0xbd,0xe9,0x62,0x72,0xc6,0x8e,0xdd,0x46,0x07,0x96,0xd0,0xc5,0xf7,0x6e,0x9f,0x1b,0x91,0x05},
+ {0xbb,0x0e,0xdf,0xf5,0x83,0x99,0x33,0xc1,0xac,0x4c,0x2c,0x51,0x8f,0x75,0xf3,0xc0,0xe1,0x98,0xb3,0x0b,0x0a,0x13,0xf1,0x2c,0x62,0x0c,0x27,0xaa,0xf9,0xec,0x3c,0x6b,0xef,0xea,0x2e,0x51,0xf3,0xac,0x49,0x53,0x49,0xcb,0xc1,0x1c,0xd3,0x41,0xc1,0x20,0x8d,0x68,0x9a,0xa9,0x07,0x0c,0x18,0x24,0x17,0x2d,0x4b,0xc6,0xd1,0xf9,0x5e,0x55,0x08,0xbd,0x73,0x3b,0xba,0x70,0xa7,0x36,0x0c,0xbf,0xaf,0xa3,0x08,0xef,0x4a,0x62,0xf2,0x46,0x09,0xb4,0x98,0xff,0x37,0x57,0x9d,0x74,0x81,0x33,0xe1,0x4d,0x5f,0x67},
+ {0xfc,0x82,0x17,0x6b,0x03,0x52,0x2c,0x0e,0xb4,0x83,0xad,0x6c,0x81,0x6c,0x81,0x64,0x3e,0x07,0x64,0x69,0xd9,0xbd,0xdc,0xd0,0x20,0xc5,0x64,0x01,0xf7,0x9d,0xd9,0x13,0x1d,0xb3,0xda,0x3b,0xd9,0xf6,0x2f,0xa1,0xfe,0x2d,0x65,0x9d,0x0f,0xd8,0x25,0x07,0x87,0x94,0xbe,0x9a,0xf3,0x4f,0x9c,0x01,0x43,0x3c,0xcd,0x82,0xb8,0x50,0xf4,0x60,0xca,0xc0,0xe5,0x21,0xc3,0x5e,0x4b,0x01,0xa2,0xbf,0x19,0xd7,0xc9,0x69,0xcb,0x4f,0xa0,0x23,0x00,0x75,0x18,0x1c,0x5f,0x4e,0x80,0xac,0xed,0x55,0x9e,0xde,0x06,0x1c},
+ {0xe2,0xc4,0x3e,0xa3,0xd6,0x7a,0x0f,0x99,0x8e,0xe0,0x2e,0xbe,0x38,0xf9,0x08,0x66,0x15,0x45,0x28,0x63,0xc5,0x43,0xa1,0x9c,0x0d,0xb6,0x2d,0xec,0x1f,0x8a,0xf3,0x4c,0xaa,0x69,0x6d,0xff,0x40,0x2b,0xd5,0xff,0xbb,0x49,0x40,0xdc,0x18,0x0b,0x53,0x34,0x97,0x98,0x4d,0xa3,0x2f,0x5c,0x4a,0x5e,0x2d,0xba,0x32,0x7d,0x8e,0x6f,0x09,0x78,0xe7,0x5c,0xfa,0x0d,0x65,0xaa,0xaa,0xa0,0x8c,0x47,0xb5,0x48,0x2a,0x9e,0xc4,0xf9,0x5b,0x72,0x03,0x70,0x7d,0xcc,0x09,0x4f,0xbe,0x1a,0x09,0x26,0x3a,0xad,0x3c,0x37},
+ {0x7c,0xf5,0xc9,0x82,0x4d,0x63,0x94,0xb2,0x36,0x45,0x93,0x24,0xe1,0xfd,0xcb,0x1f,0x5a,0xdb,0x8c,0x41,0xb3,0x4d,0x9c,0x9e,0xfc,0x19,0x44,0x45,0xd9,0xf3,0x40,0x00,0xad,0xbb,0xdd,0x89,0xfb,0xa8,0xbe,0xf1,0xcb,0xae,0xae,0x61,0xbc,0x2c,0xcb,0x3b,0x9d,0x8d,0x9b,0x1f,0xbb,0xa7,0x58,0x8f,0x86,0xa6,0x12,0x51,0xda,0x7e,0x54,0x21,0xd3,0x86,0x59,0xfd,0x39,0xe9,0xfd,0xde,0x0c,0x38,0x0a,0x51,0x89,0x2c,0x27,0xf4,0xb9,0x19,0x31,0xbb,0x07,0xa4,0x2b,0xb7,0xf4,0x4d,0x25,0x4a,0x33,0x0a,0x55,0x63},
+ {0x37,0xcf,0x69,0xb5,0xed,0xd6,0x07,0x65,0xe1,0x2e,0xa5,0x0c,0xb0,0x29,0x84,0x17,0x5d,0xd6,0x6b,0xeb,0x90,0x00,0x7c,0xea,0x51,0x8f,0xf7,0xda,0xc7,0x62,0xea,0x3e,0x49,0x7b,0x54,0x72,0x45,0x58,0xba,0x9b,0xe0,0x08,0xc4,0xe2,0xfa,0xc6,0x05,0xf3,0x8d,0xf1,0x34,0xc7,0x69,0xfa,0xe8,0x60,0x7a,0x76,0x7d,0xaa,0xaf,0x2b,0xa9,0x39,0x4e,0x27,0x93,0xe6,0x13,0xc7,0x24,0x9d,0x75,0xd3,0xdb,0x68,0x77,0x85,0x63,0x5f,0x9a,0xb3,0x8a,0xeb,0x60,0x55,0x52,0x70,0xcd,0xc4,0xc9,0x65,0x06,0x6a,0x43,0x68},
+ {0x27,0x3f,0x2f,0x20,0xe8,0x35,0x02,0xbc,0xb0,0x75,0xf9,0x64,0xe2,0x00,0x5c,0xc7,0x16,0x24,0x8c,0xa3,0xd5,0xe9,0xa4,0x91,0xf9,0x89,0xb7,0x8a,0xf6,0xe7,0xb6,0x17,0x7c,0x10,0x20,0xe8,0x17,0xd3,0x56,0x1e,0x65,0xe9,0x0a,0x84,0x44,0x68,0x26,0xc5,0x7a,0xfc,0x0f,0x32,0xc6,0xa1,0xe0,0xc1,0x72,0x14,0x61,0x91,0x9c,0x66,0x73,0x53,0x57,0x52,0x0e,0x9a,0xab,0x14,0x28,0x5d,0xfc,0xb3,0xca,0xc9,0x84,0x20,0x8f,0x90,0xca,0x1e,0x2d,0x5b,0x88,0xf5,0xca,0xaf,0x11,0x7d,0xf8,0x78,0xa6,0xb5,0xb4,0x1c},
+ {0x6c,0xfc,0x4a,0x39,0x6b,0xc0,0x64,0xb6,0xb1,0x5f,0xda,0x98,0x24,0xde,0x88,0x0c,0x34,0xd8,0xca,0x4b,0x16,0x03,0x8d,0x4f,0xa2,0x34,0x74,0xde,0x78,0xca,0x0b,0x33,0xe7,0x07,0xa0,0xa2,0x62,0xaa,0x74,0x6b,0xb1,0xc7,0x71,0xf0,0xb0,0xe0,0x11,0xf3,0x23,0xe2,0x0b,0x00,0x38,0xe4,0x07,0x57,0xac,0x6e,0xef,0x82,0x2d,0xfd,0xc0,0x2d,0x4e,0x74,0x19,0x11,0x84,0xff,0x2e,0x98,0x24,0x47,0x07,0x2b,0x96,0x5e,0x69,0xf9,0xfb,0x53,0xc9,0xbf,0x4f,0xc1,0x8a,0xc5,0xf5,0x1c,0x9f,0x36,0x1b,0xbe,0x31,0x3c},
+ {0xee,0x8a,0x94,0x08,0x4d,0x86,0xf4,0xb0,0x6f,0x1c,0xba,0x91,0xee,0x19,0xdc,0x07,0x58,0xa1,0xac,0xa6,0xae,0xcd,0x75,0x79,0xbb,0xd4,0x62,0x42,0x13,0x61,0x0b,0x33,0x72,0x42,0xcb,0xf9,0x93,0xbc,0x68,0xc1,0x98,0xdb,0xce,0xc7,0x1f,0x71,0xb8,0xae,0x7a,0x8d,0xac,0x34,0xaa,0x52,0x0e,0x7f,0xbb,0x55,0x7d,0x7e,0x09,0xc1,0xce,0x41,0x8a,0x80,0x6d,0xa2,0xd7,0x19,0x96,0xf7,0x6d,0x15,0x9e,0x1d,0x9e,0xd4,0x1f,0xbb,0x27,0xdf,0xa1,0xdb,0x6c,0xc3,0xd7,0x73,0x7d,0x77,0x28,0x1f,0xd9,0x4c,0xb4,0x26},
+ {0x75,0x74,0x38,0x8f,0x47,0x48,0xf0,0x51,0x3c,0xcb,0xbe,0x9c,0xf4,0xbc,0x5d,0xb2,0x55,0x20,0x9f,0xd9,0x44,0x12,0xab,0x9a,0xd6,0xa5,0x10,0x1c,0x6c,0x9e,0x70,0x2c,0x83,0x03,0x73,0x62,0x93,0xf2,0xb7,0xe1,0x2c,0x8a,0xca,0xeb,0xff,0x79,0x52,0x4b,0x14,0x13,0xd4,0xbf,0x8a,0x77,0xfc,0xda,0x0f,0x61,0x72,0x9c,0x14,0x10,0xeb,0x7d,0x7a,0xee,0x66,0x87,0x6a,0xaf,0x62,0xcb,0x0e,0xcd,0x53,0x55,0x04,0xec,0xcb,0x66,0xb5,0xe4,0x0b,0x0f,0x38,0x01,0x80,0x58,0xea,0xe2,0x2c,0xf6,0x9f,0x8e,0xe6,0x08},
+ {0xad,0x30,0xc1,0x4b,0x0a,0x50,0xad,0x34,0x9c,0xd4,0x0b,0x3d,0x49,0xdb,0x38,0x8d,0xbe,0x89,0x0a,0x50,0x98,0x3d,0x5c,0xa2,0x09,0x3b,0xba,0xee,0x87,0x3f,0x1f,0x2f,0xf9,0xf2,0xb8,0x0a,0xd5,0x09,0x2d,0x2f,0xdf,0x23,0x59,0xc5,0x8d,0x21,0xb9,0xac,0xb9,0x6c,0x76,0x73,0x26,0x34,0x8f,0x4a,0xf5,0x19,0xf7,0x38,0xd7,0x3b,0xb1,0x4c,0x4a,0xb6,0x15,0xe5,0x75,0x8c,0x84,0xf7,0x38,0x90,0x4a,0xdb,0xba,0x01,0x95,0xa5,0x50,0x1b,0x75,0x3f,0x3f,0x31,0x0d,0xc2,0xe8,0x2e,0xae,0xc0,0x53,0xe3,0xa1,0x19},
+ {0xc3,0x05,0xfa,0xba,0x60,0x75,0x1c,0x7d,0x61,0x5e,0xe5,0xc6,0xa0,0xa0,0xe1,0xb3,0x73,0x64,0xd6,0xc0,0x18,0x97,0x52,0xe3,0x86,0x34,0x0c,0xc2,0x11,0x6b,0x54,0x41,0xbd,0xbd,0x96,0xd5,0xcd,0x72,0x21,0xb4,0x40,0xfc,0xee,0x98,0x43,0x45,0xe0,0x93,0xb5,0x09,0x41,0xb4,0x47,0x53,0xb1,0x9f,0x34,0xae,0x66,0x02,0x99,0xd3,0x6b,0x73,0xb4,0xb3,0x34,0x93,0x50,0x2d,0x53,0x85,0x73,0x65,0x81,0x60,0x4b,0x11,0xfd,0x46,0x75,0x83,0x5c,0x42,0x30,0x5f,0x5f,0xcc,0x5c,0xab,0x7f,0xb8,0xa2,0x95,0x22,0x41},
+ {0xe9,0xd6,0x7e,0xf5,0x88,0x9b,0xc9,0x19,0x25,0xc8,0xf8,0x6d,0x26,0xcb,0x93,0x53,0x73,0xd2,0x0a,0xb3,0x13,0x32,0xee,0x5c,0x34,0x2e,0x2d,0xb5,0xeb,0x53,0xe1,0x14,0xc6,0xea,0x93,0xe2,0x61,0x52,0x65,0x2e,0xdb,0xac,0x33,0x21,0x03,0x92,0x5a,0x84,0x6b,0x99,0x00,0x79,0xcb,0x75,0x09,0x46,0x80,0xdd,0x5a,0x19,0x8d,0xbb,0x60,0x07,0x8a,0x81,0xe6,0xcd,0x17,0x1a,0x3e,0x41,0x84,0xa0,0x69,0xed,0xa9,0x6d,0x15,0x57,0xb1,0xcc,0xca,0x46,0x8f,0x26,0xbf,0x2c,0xf2,0xc5,0x3a,0xc3,0x9b,0xbe,0x34,0x6b},
+ {0xb2,0xc0,0x78,0x3a,0x64,0x2f,0xdf,0xf3,0x7c,0x02,0x2e,0xf2,0x1e,0x97,0x3e,0x4c,0xa3,0xb5,0xc1,0x49,0x5e,0x1c,0x7d,0xec,0x2d,0xdd,0x22,0x09,0x8f,0xc1,0x12,0x20,0xd3,0xf2,0x71,0x65,0x65,0x69,0xfc,0x11,0x7a,0x73,0x0e,0x53,0x45,0xe8,0xc9,0xc6,0x35,0x50,0xfe,0xd4,0xa2,0xe7,0x3a,0xe3,0x0b,0xd3,0x6d,0x2e,0xb6,0xc7,0xb9,0x01,0x29,0x9d,0xc8,0x5a,0xe5,0x55,0x0b,0x88,0x63,0xa7,0xa0,0x45,0x1f,0x24,0x83,0x14,0x1f,0x6c,0xe7,0xc2,0xdf,0xef,0x36,0x3d,0xe8,0xad,0x4b,0x4e,0x78,0x5b,0xaf,0x08},
+ {0x33,0x25,0x1f,0x88,0xdc,0x99,0x34,0x28,0xb6,0x23,0x93,0x77,0xda,0x25,0x05,0x9d,0xf4,0x41,0x34,0x67,0xfb,0xdd,0x7a,0x89,0x8d,0x16,0x3a,0x16,0x71,0x9d,0xb7,0x32,0x4b,0x2c,0xcc,0x89,0xd2,0x14,0x73,0xe2,0x8d,0x17,0x87,0xa2,0x11,0xbd,0xe4,0x4b,0xce,0x64,0x33,0xfa,0xd6,0x28,0xd5,0x18,0x6e,0x82,0xd9,0xaf,0xd5,0xc1,0x23,0x64,0x6a,0xb3,0xfc,0xed,0xd9,0xf8,0x85,0xcc,0xf9,0xe5,0x46,0x37,0x8f,0xc2,0xbc,0x22,0xcd,0xd3,0xe5,0xf9,0x38,0xe3,0x9d,0xe4,0xcc,0x2d,0x3e,0xc1,0xfb,0x5e,0x0a,0x48},
+ {0x71,0x20,0x62,0x01,0x0b,0xe7,0x51,0x0b,0xc5,0xaf,0x1d,0x8b,0xcf,0x05,0xb5,0x06,0xcd,0xab,0x5a,0xef,0x61,0xb0,0x6b,0x2c,0x31,0xbf,0xb7,0x0c,0x60,0x27,0xaa,0x47,0x1f,0x22,0xce,0x42,0xe4,0x4c,0x61,0xb6,0x28,0x39,0x05,0x4c,0xcc,0x9d,0x19,0x6e,0x03,0xbe,0x1c,0xdc,0xa4,0xb4,0x3f,0x66,0x06,0x8e,0x1c,0x69,0x47,0x1d,0xb3,0x24,0xc3,0xf8,0x15,0xc0,0xed,0x1e,0x54,0x2a,0x7c,0x3f,0x69,0x7c,0x7e,0xfe,0xa4,0x11,0xd6,0x78,0xa2,0x4e,0x13,0x66,0xaf,0xf0,0x94,0xa0,0xdd,0x14,0x5d,0x58,0x5b,0x54},
+ {0x0f,0x3a,0xd4,0xa0,0x5e,0x27,0xbf,0x67,0xbe,0xee,0x9b,0x08,0x34,0x8e,0xe6,0xad,0x2e,0xe7,0x79,0xd4,0x4c,0x13,0x89,0x42,0x54,0x54,0xba,0x32,0xc3,0xf9,0x62,0x0f,0xe1,0x21,0xb3,0xe3,0xd0,0xe4,0x04,0x62,0x95,0x1e,0xff,0x28,0x7a,0x63,0xaa,0x3b,0x9e,0xbd,0x99,0x5b,0xfd,0xcf,0x0c,0x0b,0x71,0xd0,0xc8,0x64,0x3e,0xdc,0x22,0x4d,0x39,0x5f,0x3b,0xd6,0x89,0x65,0xb4,0xfc,0x61,0xcf,0xcb,0x57,0x3f,0x6a,0xae,0x5c,0x05,0xfa,0x3a,0x95,0xd2,0xc2,0xba,0xfe,0x36,0x14,0x37,0x36,0x1a,0xa0,0x0f,0x1c},
+ {0xff,0x3d,0x94,0x22,0xb6,0x04,0xc6,0xd2,0xa0,0xb3,0xcf,0x44,0xce,0xbe,0x8c,0xbc,0x78,0x86,0x80,0x97,0xf3,0x4f,0x25,0x5d,0xbf,0xa6,0x1c,0x3b,0x4f,0x61,0xa3,0x0f,0x50,0x6a,0x93,0x8c,0x0e,0x2b,0x08,0x69,0xb6,0xc5,0xda,0xc1,0x35,0xa0,0xc9,0xf9,0x34,0xb6,0xdf,0xc4,0x54,0x3e,0xb7,0x6f,0x40,0xc1,0x2b,0x1d,0x9b,0x41,0x05,0x40,0xf0,0x82,0xbe,0xb9,0xbd,0xfe,0x03,0xa0,0x90,0xac,0x44,0x3a,0xaf,0xc1,0x89,0x20,0x8e,0xfa,0x54,0x19,0x91,0x9f,0x49,0xf8,0x42,0xab,0x40,0xef,0x8a,0x21,0xba,0x1f},
+ {0x3e,0xf5,0xc8,0xfa,0x48,0x94,0x54,0xab,0x41,0x37,0xa6,0x7b,0x9a,0xe8,0xf6,0x81,0x01,0x5e,0x2b,0x6c,0x7d,0x6c,0xfd,0x74,0x42,0x6e,0xc8,0xa8,0xca,0x3a,0x2e,0x39,0x94,0x01,0x7b,0x3e,0x04,0x57,0x3e,0x4f,0x7f,0xaf,0xda,0x08,0xee,0x3e,0x1d,0xa8,0xf1,0xde,0xdc,0x99,0xab,0xc6,0x39,0xc8,0xd5,0x61,0x77,0xff,0x13,0x5d,0x53,0x6c,0xaf,0x35,0x8a,0x3e,0xe9,0x34,0xbd,0x4c,0x16,0xe8,0x87,0x58,0x44,0x81,0x07,0x2e,0xab,0xb0,0x9a,0xf2,0x76,0x9c,0x31,0x19,0x3b,0xc1,0x0a,0xd5,0xe4,0x7f,0xe1,0x25},
+ {0x76,0xf6,0x04,0x1e,0xd7,0x9b,0x28,0x0a,0x95,0x0f,0x42,0xd6,0x52,0x1c,0x8e,0x20,0xab,0x1f,0x69,0x34,0xb0,0xd8,0x86,0x51,0x51,0xb3,0x9f,0x2a,0x44,0x51,0x57,0x25,0xa7,0x21,0xf1,0x76,0xf5,0x7f,0x5f,0x91,0xe3,0x87,0xcd,0x2f,0x27,0x32,0x4a,0xc3,0x26,0xe5,0x1b,0x4d,0xde,0x2f,0xba,0xcc,0x9b,0x89,0x69,0x89,0x8f,0x82,0xba,0x6b,0x01,0x39,0xfe,0x90,0x66,0xbc,0xd1,0xe2,0xd5,0x7a,0x99,0xa0,0x18,0x4a,0xb5,0x4c,0xd4,0x60,0x84,0xaf,0x14,0x69,0x1d,0x97,0xe4,0x7b,0x6b,0x7f,0x4f,0x50,0x9d,0x55},
+ {0xd5,0x54,0xeb,0xb3,0x78,0x83,0x73,0xa7,0x7c,0x3c,0x55,0xa5,0x66,0xd3,0x69,0x1d,0xba,0x00,0x28,0xf9,0x62,0xcf,0x26,0x0a,0x17,0x32,0x7e,0x80,0xd5,0x12,0xab,0x01,0xfd,0x66,0xd2,0xf6,0xe7,0x91,0x48,0x9c,0x1b,0x78,0x07,0x03,0x9b,0xa1,0x44,0x07,0x3b,0xe2,0x61,0x60,0x1d,0x8f,0x38,0x88,0x0e,0xd5,0x4b,0x35,0xa3,0xa6,0x3e,0x12,0x96,0x2d,0xe3,0x41,0x90,0x18,0x8d,0x11,0x48,0x58,0x31,0xd8,0xc2,0xe3,0xed,0xb9,0xd9,0x45,0x32,0xd8,0x71,0x42,0xab,0x1e,0x54,0xa1,0x18,0xc9,0xe2,0x61,0x39,0x4a},
+ {0xa0,0xbb,0xe6,0xf8,0xe0,0x3b,0xdc,0x71,0x0a,0xe3,0xff,0x7e,0x34,0xf8,0xce,0xd6,0x6a,0x47,0x3a,0xe1,0x5f,0x42,0x92,0xa9,0x63,0xb7,0x1d,0xfb,0xe3,0xbc,0xd6,0x2c,0x1e,0x3f,0x23,0xf3,0x44,0xd6,0x27,0x03,0x16,0xf0,0xfc,0x34,0x0e,0x26,0x9a,0x49,0x79,0xb9,0xda,0xf2,0x16,0xa7,0xb5,0x83,0x1f,0x11,0xd4,0x9b,0xad,0xee,0xac,0x68,0x10,0xc2,0xd7,0xf3,0x0e,0xc9,0xb4,0x38,0x0c,0x04,0xad,0xb7,0x24,0x6e,0x8e,0x30,0x23,0x3e,0xe7,0xb7,0xf1,0xd9,0x60,0x38,0x97,0xf5,0x08,0xb5,0xd5,0x60,0x57,0x59},
+ {0x97,0x63,0xaa,0x04,0xe1,0xbf,0x29,0x61,0xcb,0xfc,0xa7,0xa4,0x08,0x00,0x96,0x8f,0x58,0x94,0x90,0x7d,0x89,0xc0,0x8b,0x3f,0xa9,0x91,0xb2,0xdc,0x3e,0xa4,0x9f,0x70,0x90,0x27,0x02,0xfd,0xeb,0xcb,0x2a,0x88,0x60,0x57,0x11,0xc4,0x05,0x33,0xaf,0x89,0xf4,0x73,0x34,0x7d,0xe3,0x92,0xf4,0x65,0x2b,0x5a,0x51,0x54,0xdf,0xc5,0xb2,0x2c,0xca,0x2a,0xfd,0x63,0x8c,0x5d,0x0a,0xeb,0xff,0x4e,0x69,0x2e,0x66,0xc1,0x2b,0xd2,0x3a,0xb0,0xcb,0xf8,0x6e,0xf3,0x23,0x27,0x1f,0x13,0xc8,0xf0,0xec,0x29,0xf0,0x70},
+ {0x33,0x3e,0xed,0x2e,0xb3,0x07,0x13,0x46,0xe7,0x81,0x55,0xa4,0x33,0x2f,0x04,0xae,0x66,0x03,0x5f,0x19,0xd3,0x49,0x44,0xc9,0x58,0x48,0x31,0x6c,0x8a,0x5d,0x7d,0x0b,0xb9,0xb0,0x10,0x5e,0xaa,0xaf,0x6a,0x2a,0xa9,0x1a,0x04,0xef,0x70,0xa3,0xf0,0x78,0x1f,0xd6,0x3a,0xaa,0x77,0xfb,0x3e,0x77,0xe1,0xd9,0x4b,0xa7,0xa2,0xa5,0xec,0x44,0x43,0xd5,0x95,0x7b,0x32,0x48,0xd4,0x25,0x1d,0x0f,0x34,0xa3,0x00,0x83,0xd3,0x70,0x2b,0xc5,0xe1,0x60,0x1c,0x53,0x1c,0xde,0xe4,0xe9,0x7d,0x2c,0x51,0x24,0x22,0x27},
+ {0x2e,0x34,0xc5,0x49,0xaf,0x92,0xbc,0x1a,0xd0,0xfa,0xe6,0xb2,0x11,0xd8,0xee,0xff,0x29,0x4e,0xc8,0xfc,0x8d,0x8c,0xa2,0xef,0x43,0xc5,0x4c,0xa4,0x18,0xdf,0xb5,0x11,0xfc,0x75,0xa9,0x42,0x8a,0xbb,0x7b,0xbf,0x58,0xa3,0xad,0x96,0x77,0x39,0x5c,0x8c,0x48,0xaa,0xed,0xcd,0x6f,0xc7,0x7f,0xe2,0xa6,0x20,0xbc,0xf6,0xd7,0x5f,0x73,0x19,0x66,0x42,0xc8,0x42,0xd0,0x90,0xab,0xe3,0x7e,0x54,0x19,0x7f,0x0f,0x8e,0x84,0xeb,0xb9,0x97,0xa4,0x65,0xd0,0xa1,0x03,0x25,0x5f,0x89,0xdf,0x91,0x11,0x91,0xef,0x0f}
+};
diff --git a/src/ext/ed25519/donna/ed25519-donna-batchverify.h b/src/ext/ed25519/donna/ed25519-donna-batchverify.h
new file mode 100644
index 0000000000..43c4923b3e
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-batchverify.h
@@ -0,0 +1,275 @@
+/*
+ Ed25519 batch verification
+*/
+
+#define max_batch_size 64
+#define heap_batch_size ((max_batch_size * 2) + 1)
+
+/* which limb is the 128th bit in? */
+static const size_t limb128bits = (128 + bignum256modm_bits_per_limb - 1) / bignum256modm_bits_per_limb;
+
+typedef size_t heap_index_t;
+
+typedef struct batch_heap_t {
+ unsigned char r[heap_batch_size][16]; /* 128 bit random values */
+ ge25519 points[heap_batch_size];
+ bignum256modm scalars[heap_batch_size];
+ heap_index_t heap[heap_batch_size];
+ size_t size;
+} batch_heap;
+
+/* swap two values in the heap */
+static void
+heap_swap(heap_index_t *heap, size_t a, size_t b) {
+ heap_index_t temp;
+ temp = heap[a];
+ heap[a] = heap[b];
+ heap[b] = temp;
+}
+
+/* add the scalar at the end of the list to the heap */
+static void
+heap_insert_next(batch_heap *heap) {
+ size_t node = heap->size, parent;
+ heap_index_t *pheap = heap->heap;
+ bignum256modm *scalars = heap->scalars;
+
+ /* insert at the bottom */
+ pheap[node] = (heap_index_t)node;
+
+ /* sift node up to its sorted spot */
+ parent = (node - 1) / 2;
+ while (node && lt256_modm_batch(scalars[pheap[parent]], scalars[pheap[node]], bignum256modm_limb_size - 1)) {
+ heap_swap(pheap, parent, node);
+ node = parent;
+ parent = (node - 1) / 2;
+ }
+ heap->size++;
+}
+
+/* update the heap when the root element is updated */
+static void
+heap_updated_root(batch_heap *heap, size_t limbsize) {
+ size_t node, parent, childr, childl;
+ heap_index_t *pheap = heap->heap;
+ bignum256modm *scalars = heap->scalars;
+
+ /* sift root to the bottom */
+ parent = 0;
+ node = 1;
+ childl = 1;
+ childr = 2;
+ while ((childr < heap->size)) {
+ node = lt256_modm_batch(scalars[pheap[childl]], scalars[pheap[childr]], limbsize) ? childr : childl;
+ heap_swap(pheap, parent, node);
+ parent = node;
+ childl = (parent * 2) + 1;
+ childr = childl + 1;
+ }
+
+ /* sift root back up to its sorted spot */
+ parent = (node - 1) / 2;
+ while (node && lte256_modm_batch(scalars[pheap[parent]], scalars[pheap[node]], limbsize)) {
+ heap_swap(pheap, parent, node);
+ node = parent;
+ parent = (node - 1) / 2;
+ }
+}
+
+/* build the heap with count elements, count must be >= 3 */
+static void
+heap_build(batch_heap *heap, size_t count) {
+ heap->heap[0] = 0;
+ heap->size = 0;
+ while (heap->size < count)
+ heap_insert_next(heap);
+}
+
+/* extend the heap to contain new_count elements */
+static void
+heap_extend(batch_heap *heap, size_t new_count) {
+ while (heap->size < new_count)
+ heap_insert_next(heap);
+}
+
+/* get the top 2 elements of the heap */
+static void
+heap_get_top2(batch_heap *heap, heap_index_t *max1, heap_index_t *max2, size_t limbsize) {
+ heap_index_t h0 = heap->heap[0], h1 = heap->heap[1], h2 = heap->heap[2];
+ if (lt256_modm_batch(heap->scalars[h1], heap->scalars[h2], limbsize))
+ h1 = h2;
+ *max1 = h0;
+ *max2 = h1;
+}
+
+/* */
+static void
+ge25519_multi_scalarmult_vartime_final(ge25519 *r, ge25519 *point, bignum256modm scalar) {
+ const bignum256modm_element_t topbit = ((bignum256modm_element_t)1 << (bignum256modm_bits_per_limb - 1));
+ size_t limb = limb128bits;
+ bignum256modm_element_t flag;
+
+ if (isone256_modm_batch(scalar)) {
+ /* this will happen most of the time after bos-carter */
+ *r = *point;
+ return;
+ } else if (iszero256_modm_batch(scalar)) {
+ /* this will only happen if all scalars == 0 */
+ memset(r, 0, sizeof(*r));
+ r->y[0] = 1;
+ r->z[0] = 1;
+ return;
+ }
+
+ *r = *point;
+
+ /* find the limb where first bit is set */
+ while (!scalar[limb])
+ limb--;
+
+ /* find the first bit */
+ flag = topbit;
+ while ((scalar[limb] & flag) == 0)
+ flag >>= 1;
+
+ /* exponentiate */
+ for (;;) {
+ ge25519_double(r, r);
+ if (scalar[limb] & flag)
+ ge25519_add(r, r, point);
+
+ flag >>= 1;
+ if (!flag) {
+ if (!limb--)
+ break;
+ flag = topbit;
+ }
+ }
+}
+
+/* count must be >= 5 */
+static void
+ge25519_multi_scalarmult_vartime(ge25519 *r, batch_heap *heap, size_t count) {
+ heap_index_t max1, max2;
+
+ /* start with the full limb size */
+ size_t limbsize = bignum256modm_limb_size - 1;
+
+ /* whether the heap has been extended to include the 128 bit scalars */
+ int extended = 0;
+
+ /* grab an odd number of scalars to build the heap, unknown limb sizes */
+ heap_build(heap, ((count + 1) / 2) | 1);
+
+ for (;;) {
+ heap_get_top2(heap, &max1, &max2, limbsize);
+
+ /* only one scalar remaining, we're done */
+ if (iszero256_modm_batch(heap->scalars[max2]))
+ break;
+
+ /* exhausted another limb? */
+ if (!heap->scalars[max1][limbsize])
+ limbsize -= 1;
+
+ /* can we extend to the 128 bit scalars? */
+ if (!extended && isatmost128bits256_modm_batch(heap->scalars[max1])) {
+ heap_extend(heap, count);
+ heap_get_top2(heap, &max1, &max2, limbsize);
+ extended = 1;
+ }
+
+ sub256_modm_batch(heap->scalars[max1], heap->scalars[max1], heap->scalars[max2], limbsize);
+ ge25519_add(&heap->points[max2], &heap->points[max2], &heap->points[max1]);
+ heap_updated_root(heap, limbsize);
+ }
+
+ ge25519_multi_scalarmult_vartime_final(r, &heap->points[max1], heap->scalars[max1]);
+}
+
+/* not actually used for anything other than testing */
+unsigned char batch_point_buffer[3][32];
+
+static int
+ge25519_is_neutral_vartime(const ge25519 *p) {
+ static const unsigned char zero[32] = {0};
+ unsigned char point_buffer[3][32];
+ curve25519_contract(point_buffer[0], p->x);
+ curve25519_contract(point_buffer[1], p->y);
+ curve25519_contract(point_buffer[2], p->z);
+ memcpy(batch_point_buffer[1], point_buffer[1], 32);
+ return (memcmp(point_buffer[0], zero, 32) == 0) && (memcmp(point_buffer[1], point_buffer[2], 32) == 0);
+}
+
+int
+ED25519_FN(ed25519_sign_open_batch) (const unsigned char **m, size_t *mlen, const unsigned char **pk, const unsigned char **RS, size_t num, int *valid) {
+ batch_heap ALIGN(16) batch;
+ ge25519 ALIGN(16) p;
+ bignum256modm *r_scalars;
+ size_t i, batchsize;
+ unsigned char hram[64];
+ int ret = 0;
+
+ for (i = 0; i < num; i++)
+ valid[i] = 1;
+
+ while (num > 3) {
+ batchsize = (num > max_batch_size) ? max_batch_size : num;
+
+ /* generate r (scalars[batchsize+1]..scalars[2*batchsize] */
+ ED25519_FN(ed25519_randombytes_unsafe) (batch.r, batchsize * 16);
+ r_scalars = &batch.scalars[batchsize + 1];
+ for (i = 0; i < batchsize; i++)
+ expand256_modm(r_scalars[i], batch.r[i], 16);
+
+ /* compute scalars[0] = ((r1s1 + r2s2 + ...)) */
+ for (i = 0; i < batchsize; i++) {
+ expand256_modm(batch.scalars[i], RS[i] + 32, 32);
+ mul256_modm(batch.scalars[i], batch.scalars[i], r_scalars[i]);
+ }
+ for (i = 1; i < batchsize; i++)
+ add256_modm(batch.scalars[0], batch.scalars[0], batch.scalars[i]);
+
+ /* compute scalars[1]..scalars[batchsize] as r[i]*H(R[i],A[i],m[i]) */
+ for (i = 0; i < batchsize; i++) {
+ ed25519_hram(hram, RS[i], pk[i], m[i], mlen[i]);
+ expand256_modm(batch.scalars[i+1], hram, 64);
+ mul256_modm(batch.scalars[i+1], batch.scalars[i+1], r_scalars[i]);
+ }
+
+ /* compute points */
+ batch.points[0] = ge25519_basepoint;
+ for (i = 0; i < batchsize; i++)
+ if (!ge25519_unpack_negative_vartime(&batch.points[i+1], pk[i]))
+ goto fallback;
+ for (i = 0; i < batchsize; i++)
+ if (!ge25519_unpack_negative_vartime(&batch.points[batchsize+i+1], RS[i]))
+ goto fallback;
+
+ ge25519_multi_scalarmult_vartime(&p, &batch, (batchsize * 2) + 1);
+ if (!ge25519_is_neutral_vartime(&p)) {
+ ret |= 2;
+
+ fallback:
+ for (i = 0; i < batchsize; i++) {
+ valid[i] = ED25519_FN(ed25519_sign_open) (m[i], mlen[i], pk[i], RS[i]) ? 0 : 1;
+ ret |= (valid[i] ^ 1);
+ }
+ }
+
+ m += batchsize;
+ mlen += batchsize;
+ pk += batchsize;
+ RS += batchsize;
+ num -= batchsize;
+ valid += batchsize;
+ }
+
+ for (i = 0; i < num; i++) {
+ valid[i] = ED25519_FN(ed25519_sign_open) (m[i], mlen[i], pk[i], RS[i]) ? 0 : 1;
+ ret |= (valid[i] ^ 1);
+ }
+
+ return ret;
+}
+
diff --git a/src/ext/ed25519/donna/ed25519-donna-impl-base.h b/src/ext/ed25519/donna/ed25519-donna-impl-base.h
new file mode 100644
index 0000000000..48913edcb4
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-impl-base.h
@@ -0,0 +1,364 @@
+/*
+ conversions
+*/
+
+DONNA_INLINE static void
+ge25519_p1p1_to_partial(ge25519 *r, const ge25519_p1p1 *p) {
+ curve25519_mul(r->x, p->x, p->t);
+ curve25519_mul(r->y, p->y, p->z);
+ curve25519_mul(r->z, p->z, p->t);
+}
+
+DONNA_INLINE static void
+ge25519_p1p1_to_full(ge25519 *r, const ge25519_p1p1 *p) {
+ curve25519_mul(r->x, p->x, p->t);
+ curve25519_mul(r->y, p->y, p->z);
+ curve25519_mul(r->z, p->z, p->t);
+ curve25519_mul(r->t, p->x, p->y);
+}
+
+static void
+ge25519_full_to_pniels(ge25519_pniels *p, const ge25519 *r) {
+ curve25519_sub(p->ysubx, r->y, r->x);
+ curve25519_add(p->xaddy, r->y, r->x);
+ curve25519_copy(p->z, r->z);
+ curve25519_mul(p->t2d, r->t, ge25519_ec2d);
+}
+
+/*
+ adding & doubling
+*/
+
+static void
+ge25519_add_p1p1(ge25519_p1p1 *r, const ge25519 *p, const ge25519 *q) {
+ bignum25519 a,b,c,d,t,u;
+
+ curve25519_sub(a, p->y, p->x);
+ curve25519_add(b, p->y, p->x);
+ curve25519_sub(t, q->y, q->x);
+ curve25519_add(u, q->y, q->x);
+ curve25519_mul(a, a, t);
+ curve25519_mul(b, b, u);
+ curve25519_mul(c, p->t, q->t);
+ curve25519_mul(c, c, ge25519_ec2d);
+ curve25519_mul(d, p->z, q->z);
+ curve25519_add(d, d, d);
+ curve25519_sub(r->x, b, a);
+ curve25519_add(r->y, b, a);
+ curve25519_add_after_basic(r->z, d, c);
+ curve25519_sub_after_basic(r->t, d, c);
+}
+
+
+static void
+ge25519_double_p1p1(ge25519_p1p1 *r, const ge25519 *p) {
+ bignum25519 a,b,c;
+
+ curve25519_square(a, p->x);
+ curve25519_square(b, p->y);
+ curve25519_square(c, p->z);
+ curve25519_add_reduce(c, c, c);
+ curve25519_add(r->x, p->x, p->y);
+ curve25519_square(r->x, r->x);
+ curve25519_add(r->y, b, a);
+ curve25519_sub(r->z, b, a);
+ curve25519_sub_after_basic(r->x, r->x, r->y);
+ curve25519_sub_after_basic(r->t, c, r->z);
+}
+
+static void
+ge25519_nielsadd2_p1p1(ge25519_p1p1 *r, const ge25519 *p, const ge25519_niels *q, unsigned char signbit) {
+ const bignum25519 *qb = (const bignum25519 *)q;
+ bignum25519 *rb = (bignum25519 *)r;
+ bignum25519 a,b,c;
+
+ curve25519_sub(a, p->y, p->x);
+ curve25519_add(b, p->y, p->x);
+ curve25519_mul(a, a, qb[signbit]); /* x for +, y for - */
+ curve25519_mul(r->x, b, qb[signbit^1]); /* y for +, x for - */
+ curve25519_add(r->y, r->x, a);
+ curve25519_sub(r->x, r->x, a);
+ curve25519_mul(c, p->t, q->t2d);
+ curve25519_add_reduce(r->t, p->z, p->z);
+ curve25519_copy(r->z, r->t);
+ curve25519_add(rb[2+signbit], rb[2+signbit], c); /* z for +, t for - */
+ curve25519_sub(rb[2+(signbit^1)], rb[2+(signbit^1)], c); /* t for +, z for - */
+}
+
+static void
+ge25519_pnielsadd_p1p1(ge25519_p1p1 *r, const ge25519 *p, const ge25519_pniels *q, unsigned char signbit) {
+ const bignum25519 *qb = (const bignum25519 *)q;
+ bignum25519 *rb = (bignum25519 *)r;
+ bignum25519 a,b,c;
+
+ curve25519_sub(a, p->y, p->x);
+ curve25519_add(b, p->y, p->x);
+ curve25519_mul(a, a, qb[signbit]); /* ysubx for +, xaddy for - */
+ curve25519_mul(r->x, b, qb[signbit^1]); /* xaddy for +, ysubx for - */
+ curve25519_add(r->y, r->x, a);
+ curve25519_sub(r->x, r->x, a);
+ curve25519_mul(c, p->t, q->t2d);
+ curve25519_mul(r->t, p->z, q->z);
+ curve25519_add_reduce(r->t, r->t, r->t);
+ curve25519_copy(r->z, r->t);
+ curve25519_add(rb[2+signbit], rb[2+signbit], c); /* z for +, t for - */
+ curve25519_sub(rb[2+(signbit^1)], rb[2+(signbit^1)], c); /* t for +, z for - */
+}
+
+static void
+ge25519_double_partial(ge25519 *r, const ge25519 *p) {
+ ge25519_p1p1 t;
+ ge25519_double_p1p1(&t, p);
+ ge25519_p1p1_to_partial(r, &t);
+}
+
+static void
+ge25519_double(ge25519 *r, const ge25519 *p) {
+ ge25519_p1p1 t;
+ ge25519_double_p1p1(&t, p);
+ ge25519_p1p1_to_full(r, &t);
+}
+
+static void
+ge25519_add(ge25519 *r, const ge25519 *p, const ge25519 *q) {
+ ge25519_p1p1 t;
+ ge25519_add_p1p1(&t, p, q);
+ ge25519_p1p1_to_full(r, &t);
+}
+
+static void
+ge25519_nielsadd2(ge25519 *r, const ge25519_niels *q) {
+ bignum25519 a,b,c,e,f,g,h;
+
+ curve25519_sub(a, r->y, r->x);
+ curve25519_add(b, r->y, r->x);
+ curve25519_mul(a, a, q->ysubx);
+ curve25519_mul(e, b, q->xaddy);
+ curve25519_add(h, e, a);
+ curve25519_sub(e, e, a);
+ curve25519_mul(c, r->t, q->t2d);
+ curve25519_add(f, r->z, r->z);
+ curve25519_add_after_basic(g, f, c);
+ curve25519_sub_after_basic(f, f, c);
+ curve25519_mul(r->x, e, f);
+ curve25519_mul(r->y, h, g);
+ curve25519_mul(r->z, g, f);
+ curve25519_mul(r->t, e, h);
+}
+
+static void
+ge25519_pnielsadd(ge25519_pniels *r, const ge25519 *p, const ge25519_pniels *q) {
+ bignum25519 a,b,c,x,y,z,t;
+
+ curve25519_sub(a, p->y, p->x);
+ curve25519_add(b, p->y, p->x);
+ curve25519_mul(a, a, q->ysubx);
+ curve25519_mul(x, b, q->xaddy);
+ curve25519_add(y, x, a);
+ curve25519_sub(x, x, a);
+ curve25519_mul(c, p->t, q->t2d);
+ curve25519_mul(t, p->z, q->z);
+ curve25519_add(t, t, t);
+ curve25519_add_after_basic(z, t, c);
+ curve25519_sub_after_basic(t, t, c);
+ curve25519_mul(r->xaddy, x, t);
+ curve25519_mul(r->ysubx, y, z);
+ curve25519_mul(r->z, z, t);
+ curve25519_mul(r->t2d, x, y);
+ curve25519_copy(y, r->ysubx);
+ curve25519_sub(r->ysubx, r->ysubx, r->xaddy);
+ curve25519_add(r->xaddy, r->xaddy, y);
+ curve25519_mul(r->t2d, r->t2d, ge25519_ec2d);
+}
+
+
+/*
+ pack & unpack
+*/
+
+static void
+ge25519_pack(unsigned char r[32], const ge25519 *p) {
+ bignum25519 tx, ty, zi;
+ unsigned char parity[32];
+ curve25519_recip(zi, p->z);
+ curve25519_mul(tx, p->x, zi);
+ curve25519_mul(ty, p->y, zi);
+ curve25519_contract(r, ty);
+ curve25519_contract(parity, tx);
+ r[31] ^= ((parity[0] & 1) << 7);
+}
+
+static int
+ge25519_unpack_negative_vartime(ge25519 *r, const unsigned char p[32]) {
+ static const unsigned char zero[32] = {0};
+ static const bignum25519 one = {1};
+ unsigned char parity = p[31] >> 7;
+ unsigned char check[32];
+ bignum25519 t, root, num, den, d3;
+
+ curve25519_expand(r->y, p);
+ curve25519_copy(r->z, one);
+ curve25519_square(num, r->y); /* x = y^2 */
+ curve25519_mul(den, num, ge25519_ecd); /* den = dy^2 */
+ curve25519_sub_reduce(num, num, r->z); /* x = y^1 - 1 */
+ curve25519_add(den, den, r->z); /* den = dy^2 + 1 */
+
+ /* Computation of sqrt(num/den) */
+ /* 1.: computation of num^((p-5)/8)*den^((7p-35)/8) = (num*den^7)^((p-5)/8) */
+ curve25519_square(t, den);
+ curve25519_mul(d3, t, den);
+ curve25519_square(r->x, d3);
+ curve25519_mul(r->x, r->x, den);
+ curve25519_mul(r->x, r->x, num);
+ curve25519_pow_two252m3(r->x, r->x);
+
+ /* 2. computation of r->x = num * den^3 * (num*den^7)^((p-5)/8) */
+ curve25519_mul(r->x, r->x, d3);
+ curve25519_mul(r->x, r->x, num);
+
+ /* 3. Check if either of the roots works: */
+ curve25519_square(t, r->x);
+ curve25519_mul(t, t, den);
+ curve25519_sub_reduce(root, t, num);
+ curve25519_contract(check, root);
+ if (!ed25519_verify(check, zero, 32)) {
+ curve25519_add_reduce(t, t, num);
+ curve25519_contract(check, t);
+ if (!ed25519_verify(check, zero, 32))
+ return 0;
+ curve25519_mul(r->x, r->x, ge25519_sqrtneg1);
+ }
+
+ curve25519_contract(check, r->x);
+ if ((check[0] & 1) == parity) {
+ curve25519_copy(t, r->x);
+ curve25519_neg(r->x, t);
+ }
+ curve25519_mul(r->t, r->x, r->y);
+ return 1;
+}
+
+
+/*
+ scalarmults
+*/
+
+#define S1_SWINDOWSIZE 5
+#define S1_TABLE_SIZE (1<<(S1_SWINDOWSIZE-2))
+#define S2_SWINDOWSIZE 7
+#define S2_TABLE_SIZE (1<<(S2_SWINDOWSIZE-2))
+
+/* computes [s1]p1 + [s2]basepoint */
+static void
+ge25519_double_scalarmult_vartime(ge25519 *r, const ge25519 *p1, const bignum256modm s1, const bignum256modm s2) {
+ signed char slide1[256], slide2[256];
+ ge25519_pniels pre1[S1_TABLE_SIZE];
+ ge25519 d1;
+ ge25519_p1p1 t;
+ int32_t i;
+
+ contract256_slidingwindow_modm(slide1, s1, S1_SWINDOWSIZE);
+ contract256_slidingwindow_modm(slide2, s2, S2_SWINDOWSIZE);
+
+ ge25519_double(&d1, p1);
+ ge25519_full_to_pniels(pre1, p1);
+ for (i = 0; i < S1_TABLE_SIZE - 1; i++)
+ ge25519_pnielsadd(&pre1[i+1], &d1, &pre1[i]);
+
+ /* set neutral */
+ memset(r, 0, sizeof(ge25519));
+ r->y[0] = 1;
+ r->z[0] = 1;
+
+ i = 255;
+ while ((i >= 0) && !(slide1[i] | slide2[i]))
+ i--;
+
+ for (; i >= 0; i--) {
+ ge25519_double_p1p1(&t, r);
+
+ if (slide1[i]) {
+ ge25519_p1p1_to_full(r, &t);
+ ge25519_pnielsadd_p1p1(&t, r, &pre1[abs(slide1[i]) / 2], (unsigned char)slide1[i] >> 7);
+ }
+
+ if (slide2[i]) {
+ ge25519_p1p1_to_full(r, &t);
+ ge25519_nielsadd2_p1p1(&t, r, &ge25519_niels_sliding_multiples[abs(slide2[i]) / 2], (unsigned char)slide2[i] >> 7);
+ }
+
+ ge25519_p1p1_to_partial(r, &t);
+ }
+}
+
+
+
+#if !defined(HAVE_GE25519_SCALARMULT_BASE_CHOOSE_NIELS)
+
+static uint32_t
+ge25519_windowb_equal(uint32_t b, uint32_t c) {
+ return ((b ^ c) - 1) >> 31;
+}
+
+static void
+ge25519_scalarmult_base_choose_niels(ge25519_niels *t, const uint8_t table[256][96], uint32_t pos, signed char b) {
+ bignum25519 neg;
+ uint32_t sign = (uint32_t)((unsigned char)b >> 7);
+ uint32_t mask = ~(sign - 1);
+ uint32_t u = (b + mask) ^ mask;
+ uint32_t i;
+
+ /* ysubx, xaddy, t2d in packed form. initialize to ysubx = 1, xaddy = 1, t2d = 0 */
+ uint8_t packed[96] = {0};
+ packed[0] = 1;
+ packed[32] = 1;
+
+ for (i = 0; i < 8; i++)
+ curve25519_move_conditional_bytes(packed, table[(pos * 8) + i], ge25519_windowb_equal(u, i + 1));
+
+ /* expand in to t */
+ curve25519_expand(t->ysubx, packed + 0);
+ curve25519_expand(t->xaddy, packed + 32);
+ curve25519_expand(t->t2d , packed + 64);
+
+ /* adjust for sign */
+ curve25519_swap_conditional(t->ysubx, t->xaddy, sign);
+ curve25519_neg(neg, t->t2d);
+ curve25519_swap_conditional(t->t2d, neg, sign);
+}
+
+#endif /* HAVE_GE25519_SCALARMULT_BASE_CHOOSE_NIELS */
+
+
+/* computes [s]basepoint */
+static void
+ge25519_scalarmult_base_niels(ge25519 *r, const uint8_t basepoint_table[256][96], const bignum256modm s) {
+ signed char b[64];
+ uint32_t i;
+ ge25519_niels t;
+
+ contract256_window4_modm(b, s);
+
+ ge25519_scalarmult_base_choose_niels(&t, basepoint_table, 0, b[1]);
+ curve25519_sub_reduce(r->x, t.xaddy, t.ysubx);
+ curve25519_add_reduce(r->y, t.xaddy, t.ysubx);
+ memset(r->z, 0, sizeof(bignum25519));
+ curve25519_copy(r->t, t.t2d);
+ r->z[0] = 2;
+ for (i = 3; i < 64; i += 2) {
+ ge25519_scalarmult_base_choose_niels(&t, basepoint_table, i / 2, b[i]);
+ ge25519_nielsadd2(r, &t);
+ }
+ ge25519_double_partial(r, r);
+ ge25519_double_partial(r, r);
+ ge25519_double_partial(r, r);
+ ge25519_double(r, r);
+ ge25519_scalarmult_base_choose_niels(&t, basepoint_table, 0, b[0]);
+ curve25519_mul(t.t2d, t.t2d, ge25519_ecd);
+ ge25519_nielsadd2(r, &t);
+ for(i = 2; i < 64; i += 2) {
+ ge25519_scalarmult_base_choose_niels(&t, basepoint_table, i / 2, b[i]);
+ ge25519_nielsadd2(r, &t);
+ }
+}
+
diff --git a/src/ext/ed25519/donna/ed25519-donna-impl-sse2.h b/src/ext/ed25519/donna/ed25519-donna-impl-sse2.h
new file mode 100644
index 0000000000..5fe3416381
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-impl-sse2.h
@@ -0,0 +1,390 @@
+/*
+ conversions
+*/
+
+static void
+ge25519_p1p1_to_partial(ge25519 *r, const ge25519_p1p1 *p) {
+ packed64bignum25519 ALIGN(16) xz, tt, xzout;
+ curve25519_mul(r->y, p->y, p->z);
+ curve25519_tangle64(xz, p->x, p->z);
+ curve25519_tangleone64(tt, p->t);
+ curve25519_mul_packed64(xzout, xz, tt);
+ curve25519_untangle64(r->x, r->z, xzout);
+}
+
+static void
+ge25519_p1p1_to_full(ge25519 *r, const ge25519_p1p1 *p) {
+ packed64bignum25519 ALIGN(16) zy, xt, xx, zz, ty;
+ curve25519_tangle64(ty, p->t, p->y);
+ curve25519_tangleone64(xx, p->x);
+ curve25519_mul_packed64(xt, xx, ty);
+ curve25519_untangle64(r->x, r->t, xt);
+ curve25519_tangleone64(zz, p->z);
+ curve25519_mul_packed64(zy, zz, ty);
+ curve25519_untangle64(r->z, r->y, zy);
+}
+
+static void
+ge25519_full_to_pniels(ge25519_pniels *p, const ge25519 *r) {
+ curve25519_sub(p->ysubx, r->y, r->x);
+ curve25519_add(p->xaddy, r->x, r->y);
+ curve25519_copy(p->z, r->z);
+ curve25519_mul(p->t2d, r->t, ge25519_ec2d);
+}
+
+/*
+ adding & doubling
+*/
+
+static void
+ge25519_add_p1p1(ge25519_p1p1 *r, const ge25519 *p, const ge25519 *q) {
+ bignum25519 ALIGN(16) a,b,c,d;
+ packed32bignum25519 ALIGN(16) xx, yy, yypxx, yymxx, bd, ac, bdmac, bdpac;
+ packed64bignum25519 ALIGN(16) at, bu, atbu, ptz, qtz, cd;
+
+ curve25519_tangle32(yy, p->y, q->y);
+ curve25519_tangle32(xx, p->x, q->x);
+ curve25519_add_packed32(yypxx, yy, xx);
+ curve25519_sub_packed32(yymxx, yy, xx);
+ curve25519_tangle64_from32(at, bu, yymxx, yypxx);
+ curve25519_mul_packed64(atbu, at, bu);
+ curve25519_untangle64(a, b, atbu);
+ curve25519_tangle64(ptz, p->t, p->z);
+ curve25519_tangle64(qtz, q->t, q->z);
+ curve25519_mul_packed64(cd, ptz, qtz);
+ curve25519_untangle64(c, d, cd);
+ curve25519_mul(c, c, ge25519_ec2d);
+ curve25519_add_reduce(d, d, d);
+ /* reduce, so no after_basic is needed later */
+ curve25519_tangle32(bd, b, d);
+ curve25519_tangle32(ac, a, c);
+ curve25519_sub_packed32(bdmac, bd, ac);
+ curve25519_add_packed32(bdpac, bd, ac);
+ curve25519_untangle32(r->x, r->t, bdmac);
+ curve25519_untangle32(r->y, r->z, bdpac);
+}
+
+
+static void
+ge25519_double_p1p1(ge25519_p1p1 *r, const ge25519 *p) {
+ bignum25519 ALIGN(16) a,b,c,x;
+ packed64bignum25519 ALIGN(16) xy, zx, ab, cx;
+ packed32bignum25519 ALIGN(16) xc, yz, xt, yc, ac, bc;
+
+ curve25519_add(x, p->x, p->y);
+ curve25519_tangle64(xy, p->x, p->y);
+ curve25519_square_packed64(ab, xy);
+ curve25519_untangle64(a, b, ab);
+ curve25519_tangle64(zx, p->z, x);
+ curve25519_square_packed64(cx, zx);
+ curve25519_untangle64(c, x, cx);
+ curve25519_tangle32(bc, b, c);
+ curve25519_tangle32(ac, a, c);
+ curve25519_add_reduce_packed32(yc, bc, ac);
+ curve25519_untangle32(r->y, c, yc);
+ curve25519_sub(r->z, b, a);
+ curve25519_tangle32(yz, r->y, r->z);
+ curve25519_tangle32(xc, x, c);
+ curve25519_sub_after_basic_packed32(xt, xc, yz);
+ curve25519_untangle32(r->x, r->t, xt);
+}
+
+static void
+ge25519_nielsadd2_p1p1(ge25519_p1p1 *r, const ge25519 *p, const ge25519_niels *q, unsigned char signbit) {
+ const bignum25519 *qb = (const bignum25519 *)q;
+ bignum25519 *rb = (bignum25519 *)r;
+ bignum25519 ALIGN(16) a,b,c;
+ packed64bignum25519 ALIGN(16) ab, yx, aybx;
+ packed32bignum25519 ALIGN(16) bd, ac, bdac;
+
+ curve25519_sub(a, p->y, p->x);
+ curve25519_add(b, p->y, p->x);
+ curve25519_tangle64(ab, a, b);
+ curve25519_tangle64(yx, qb[signbit], qb[signbit^1]);
+ curve25519_mul_packed64(aybx, ab, yx);
+ curve25519_untangle64(a, b, aybx);
+ curve25519_add(r->y, b, a);
+ curve25519_add_reduce(r->t, p->z, p->z);
+ curve25519_mul(c, p->t, q->t2d);
+ curve25519_copy(r->z, r->t);
+ curve25519_add(rb[2+signbit], rb[2+signbit], c);
+ curve25519_tangle32(bd, b, rb[2+(signbit^1)]);
+ curve25519_tangle32(ac, a, c);
+ curve25519_sub_packed32(bdac, bd, ac);
+ curve25519_untangle32(r->x, rb[2+(signbit^1)], bdac);
+}
+
+static void
+ge25519_pnielsadd_p1p1(ge25519_p1p1 *r, const ge25519 *p, const ge25519_pniels *q, unsigned char signbit) {
+ const bignum25519 *qb = (const bignum25519 *)q;
+ bignum25519 *rb = (bignum25519 *)r;
+ bignum25519 ALIGN(16) a,b,c;
+ packed64bignum25519 ALIGN(16) ab, yx, aybx, zt, zt2d, tc;
+ packed32bignum25519 ALIGN(16) bd, ac, bdac;
+
+ curve25519_sub(a, p->y, p->x);
+ curve25519_add(b, p->y, p->x);
+ curve25519_tangle64(ab, a, b);
+ curve25519_tangle64(yx, qb[signbit], qb[signbit^1]);
+ curve25519_mul_packed64(aybx, ab, yx);
+ curve25519_untangle64(a, b, aybx);
+ curve25519_add(r->y, b, a);
+ curve25519_tangle64(zt, p->z, p->t);
+ curve25519_tangle64(zt2d, q->z, q->t2d);
+ curve25519_mul_packed64(tc, zt, zt2d);
+ curve25519_untangle64(r->t, c, tc);
+ curve25519_add_reduce(r->t, r->t, r->t);
+ curve25519_copy(r->z, r->t);
+ curve25519_add(rb[2+signbit], rb[2+signbit], c);
+ curve25519_tangle32(bd, b, rb[2+(signbit^1)]);
+ curve25519_tangle32(ac, a, c);
+ curve25519_sub_packed32(bdac, bd, ac);
+ curve25519_untangle32(r->x, rb[2+(signbit^1)], bdac);
+}
+
+static void
+ge25519_double(ge25519 *r, const ge25519 *p) {
+ ge25519_p1p1 ALIGN(16) t;
+ ge25519_double_p1p1(&t, p);
+ ge25519_p1p1_to_full(r, &t);
+}
+
+static void
+ge25519_add(ge25519 *r, const ge25519 *p, const ge25519 *q) {
+ ge25519_p1p1 ALIGN(16) t;
+ ge25519_add_p1p1(&t, p, q);
+ ge25519_p1p1_to_full(r, &t);
+}
+
+static void
+ge25519_double_partial(ge25519 *r, const ge25519 *p) {
+ ge25519_p1p1 ALIGN(16) t;
+ ge25519_double_p1p1(&t, p);
+ ge25519_p1p1_to_partial(r, &t);
+}
+
+static void
+ge25519_nielsadd2(ge25519 *r, const ge25519_niels *q) {
+ packed64bignum25519 ALIGN(16) ab, yx, aybx, eg, ff, hh, xz, ty;
+ packed32bignum25519 ALIGN(16) bd, ac, bdac;
+ bignum25519 ALIGN(16) a,b,c,d,e,f,g,h;
+
+ curve25519_sub(a, r->y, r->x);
+ curve25519_add(b, r->y, r->x);
+ curve25519_tangle64(ab, a, b);
+ curve25519_tangle64(yx, q->ysubx, q->xaddy);
+ curve25519_mul_packed64(aybx, ab, yx);
+ curve25519_untangle64(a, b, aybx);
+ curve25519_add(h, b, a);
+ curve25519_add_reduce(d, r->z, r->z);
+ curve25519_mul(c, r->t, q->t2d);
+ curve25519_add(g, d, c); /* d is reduced, so no need for after_basic */
+ curve25519_tangle32(bd, b, d);
+ curve25519_tangle32(ac, a, c);
+ curve25519_sub_packed32(bdac, bd, ac); /* d is reduced, so no need for after_basic */
+ curve25519_untangle32(e, f, bdac);
+ curve25519_tangle64(eg, e, g);
+ curve25519_tangleone64(ff, f);
+ curve25519_mul_packed64(xz, eg, ff);
+ curve25519_untangle64(r->x, r->z, xz);
+ curve25519_tangleone64(hh, h);
+ curve25519_mul_packed64(ty, eg, hh);
+ curve25519_untangle64(r->t, r->y, ty);
+}
+
+static void
+ge25519_pnielsadd(ge25519_pniels *r, const ge25519 *p, const ge25519_pniels *q) {
+ ge25519_p1p1 ALIGN(16) t;
+ ge25519 ALIGN(16) f;
+ ge25519_pnielsadd_p1p1(&t, p, q, 0);
+ ge25519_p1p1_to_full(&f, &t);
+ ge25519_full_to_pniels(r, &f);
+}
+
+/*
+ pack & unpack
+*/
+
+static void
+ge25519_pack(unsigned char r[32], const ge25519 *p) {
+ bignum25519 ALIGN(16) tx, ty, zi;
+ unsigned char parity[32];
+ curve25519_recip(zi, p->z);
+ curve25519_mul(tx, p->x, zi);
+ curve25519_mul(ty, p->y, zi);
+ curve25519_contract(r, ty);
+ curve25519_contract(parity, tx);
+ r[31] ^= ((parity[0] & 1) << 7);
+}
+
+
+static int
+ge25519_unpack_negative_vartime(ge25519 *r, const unsigned char p[32]) {
+ static const bignum25519 ALIGN(16) one = {1};
+ static const unsigned char zero[32] = {0};
+ unsigned char parity = p[31] >> 7;
+ unsigned char check[32];
+ bignum25519 ALIGN(16) t, root, num, den, d3;
+
+ curve25519_expand(r->y, p);
+ curve25519_copy(r->z, one);
+ curve25519_square_times(num, r->y, 1); /* x = y^2 */
+ curve25519_mul(den, num, ge25519_ecd); /* den = dy^2 */
+ curve25519_sub_reduce(num, num, r->z); /* x = y^2 - 1 */
+ curve25519_add(den, den, r->z); /* den = dy^2 + 1 */
+
+ /* Computation of sqrt(num/den) */
+ /* 1.: computation of num^((p-5)/8)*den^((7p-35)/8) = (num*den^7)^((p-5)/8) */
+ curve25519_square_times(t, den, 1);
+ curve25519_mul(d3, t, den);
+ curve25519_square_times(r->x, d3, 1);
+ curve25519_mul(r->x, r->x, den);
+ curve25519_mul(r->x, r->x, num);
+ curve25519_pow_two252m3(r->x, r->x);
+
+ /* 2. computation of r->x = t * num * den^3 */
+ curve25519_mul(r->x, r->x, d3);
+ curve25519_mul(r->x, r->x, num);
+
+ /* 3. Check if either of the roots works: */
+ curve25519_square_times(t, r->x, 1);
+ curve25519_mul(t, t, den);
+ curve25519_copy(root, t);
+ curve25519_sub_reduce(root, root, num);
+ curve25519_contract(check, root);
+ if (!ed25519_verify(check, zero, 32)) {
+ curve25519_add_reduce(t, t, num);
+ curve25519_contract(check, t);
+ if (!ed25519_verify(check, zero, 32))
+ return 0;
+ curve25519_mul(r->x, r->x, ge25519_sqrtneg1);
+ }
+
+ curve25519_contract(check, r->x);
+ if ((check[0] & 1) == parity) {
+ curve25519_copy(t, r->x);
+ curve25519_neg(r->x, t);
+ }
+ curve25519_mul(r->t, r->x, r->y);
+ return 1;
+}
+
+
+
+/*
+ scalarmults
+*/
+
+#define S1_SWINDOWSIZE 5
+#define S1_TABLE_SIZE (1<<(S1_SWINDOWSIZE-2))
+#define S2_SWINDOWSIZE 7
+#define S2_TABLE_SIZE (1<<(S2_SWINDOWSIZE-2))
+
+static void
+ge25519_double_scalarmult_vartime(ge25519 *r, const ge25519 *p1, const bignum256modm s1, const bignum256modm s2) {
+ signed char slide1[256], slide2[256];
+ ge25519_pniels ALIGN(16) pre1[S1_TABLE_SIZE];
+ ge25519 ALIGN(16) d1;
+ ge25519_p1p1 ALIGN(16) t;
+ int32_t i;
+
+ contract256_slidingwindow_modm(slide1, s1, S1_SWINDOWSIZE);
+ contract256_slidingwindow_modm(slide2, s2, S2_SWINDOWSIZE);
+
+ ge25519_double(&d1, p1);
+ ge25519_full_to_pniels(pre1, p1);
+ for (i = 0; i < S1_TABLE_SIZE - 1; i++)
+ ge25519_pnielsadd(&pre1[i+1], &d1, &pre1[i]);
+
+ /* set neutral */
+ memset(r, 0, sizeof(ge25519));
+ r->y[0] = 1;
+ r->z[0] = 1;
+
+ i = 255;
+ while ((i >= 0) && !(slide1[i] | slide2[i]))
+ i--;
+
+ for (; i >= 0; i--) {
+ ge25519_double_p1p1(&t, r);
+
+ if (slide1[i]) {
+ ge25519_p1p1_to_full(r, &t);
+ ge25519_pnielsadd_p1p1(&t, r, &pre1[abs(slide1[i]) / 2], (unsigned char)slide1[i] >> 7);
+ }
+
+ if (slide2[i]) {
+ ge25519_p1p1_to_full(r, &t);
+ ge25519_nielsadd2_p1p1(&t, r, &ge25519_niels_sliding_multiples[abs(slide2[i]) / 2], (unsigned char)slide2[i] >> 7);
+ }
+
+ ge25519_p1p1_to_partial(r, &t);
+ }
+}
+
+#if !defined(HAVE_GE25519_SCALARMULT_BASE_CHOOSE_NIELS)
+
+static uint32_t
+ge25519_windowb_equal(uint32_t b, uint32_t c) {
+ return ((b ^ c) - 1) >> 31;
+}
+
+static void
+ge25519_scalarmult_base_choose_niels(ge25519_niels *t, const uint8_t table[256][96], uint32_t pos, signed char b) {
+ bignum25519 ALIGN(16) neg;
+ uint32_t sign = (uint32_t)((unsigned char)b >> 7);
+ uint32_t mask = ~(sign - 1);
+ uint32_t u = (b + mask) ^ mask;
+ uint32_t i;
+
+ /* ysubx, xaddy, t2d in packed form. initialize to ysubx = 1, xaddy = 1, t2d = 0 */
+ uint8_t ALIGN(16) packed[96] = {0};
+ packed[0] = 1;
+ packed[32] = 1;
+
+ for (i = 0; i < 8; i++)
+ curve25519_move_conditional_bytes(packed, table[(pos * 8) + i], ge25519_windowb_equal(u, i + 1));
+
+ /* expand in to t */
+ curve25519_expand(t->ysubx, packed + 0);
+ curve25519_expand(t->xaddy, packed + 32);
+ curve25519_expand(t->t2d , packed + 64);
+
+ /* adjust for sign */
+ curve25519_swap_conditional(t->ysubx, t->xaddy, sign);
+ curve25519_neg(neg, t->t2d);
+ curve25519_swap_conditional(t->t2d, neg, sign);
+}
+
+#endif /* HAVE_GE25519_SCALARMULT_BASE_CHOOSE_NIELS */
+
+static void
+ge25519_scalarmult_base_niels(ge25519 *r, const uint8_t table[256][96], const bignum256modm s) {
+ signed char b[64];
+ uint32_t i;
+ ge25519_niels ALIGN(16) t;
+
+ contract256_window4_modm(b, s);
+
+ ge25519_scalarmult_base_choose_niels(&t, table, 0, b[1]);
+ curve25519_sub_reduce(r->x, t.xaddy, t.ysubx);
+ curve25519_add_reduce(r->y, t.xaddy, t.ysubx);
+ memset(r->z, 0, sizeof(bignum25519));
+ r->z[0] = 2;
+ curve25519_copy(r->t, t.t2d);
+ for (i = 3; i < 64; i += 2) {
+ ge25519_scalarmult_base_choose_niels(&t, table, i / 2, b[i]);
+ ge25519_nielsadd2(r, &t);
+ }
+ ge25519_double_partial(r, r);
+ ge25519_double_partial(r, r);
+ ge25519_double_partial(r, r);
+ ge25519_double(r, r);
+ ge25519_scalarmult_base_choose_niels(&t, table, 0, b[0]);
+ curve25519_mul(t.t2d, t.t2d, ge25519_ecd);
+ ge25519_nielsadd2(r, &t);
+ for(i = 2; i < 64; i += 2) {
+ ge25519_scalarmult_base_choose_niels(&t, table, i / 2, b[i]);
+ ge25519_nielsadd2(r, &t);
+ }
+}
diff --git a/src/ext/ed25519/donna/ed25519-donna-portable-identify.h b/src/ext/ed25519/donna/ed25519-donna-portable-identify.h
new file mode 100644
index 0000000000..26a264cf9e
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-portable-identify.h
@@ -0,0 +1,103 @@
+/* os */
+#if defined(_WIN32) || defined(_WIN64) || defined(__TOS_WIN__) || defined(__WINDOWS__)
+ #define OS_WINDOWS
+#elif defined(sun) || defined(__sun) || defined(__SVR4) || defined(__svr4__)
+ #define OS_SOLARIS
+#else
+ #include /* need this to define BSD */
+ #define OS_NIX
+ #if defined(__linux__)
+ #define OS_LINUX
+ #elif defined(BSD)
+ #define OS_BSD
+ #if defined(MACOS_X) || (defined(__APPLE__) & defined(__MACH__))
+ #define OS_OSX
+ #elif defined(macintosh) || defined(Macintosh)
+ #define OS_MAC
+ #elif defined(__OpenBSD__)
+ #define OS_OPENBSD
+ #endif
+ #endif
+#endif
+
+
+/* compiler */
+#if defined(_MSC_VER)
+ #define COMPILER_MSVC
+#endif
+#if defined(__ICC)
+ #define COMPILER_INTEL
+#endif
+#if defined(__GNUC__)
+ #if (__GNUC__ >= 3)
+ #define COMPILER_GCC ((__GNUC__ * 10000) + (__GNUC_MINOR__ * 100) + (__GNUC_PATCHLEVEL__))
+ #else
+ #define COMPILER_GCC ((__GNUC__ * 10000) + (__GNUC_MINOR__ * 100) )
+ #endif
+#endif
+#if defined(__PATHCC__)
+ #define COMPILER_PATHCC
+#endif
+#if defined(__clang__)
+ #define COMPILER_CLANG ((__clang_major__ * 10000) + (__clang_minor__ * 100) + (__clang_patchlevel__))
+#endif
+
+
+
+/* cpu */
+#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__ ) || defined(_M_X64)
+ #define CPU_X86_64
+#elif defined(__i586__) || defined(__i686__) || (defined(_M_IX86) && (_M_IX86 >= 500))
+ #define CPU_X86 500
+#elif defined(__i486__) || (defined(_M_IX86) && (_M_IX86 >= 400))
+ #define CPU_X86 400
+#elif defined(__i386__) || (defined(_M_IX86) && (_M_IX86 >= 300)) || defined(__X86__) || defined(_X86_) || defined(__I86__)
+ #define CPU_X86 300
+#elif defined(__ia64__) || defined(_IA64) || defined(__IA64__) || defined(_M_IA64) || defined(__ia64)
+ #define CPU_IA64
+#endif
+
+#if defined(__sparc__) || defined(__sparc) || defined(__sparcv9)
+ #define CPU_SPARC
+ #if defined(__sparcv9)
+ #define CPU_SPARC64
+ #endif
+#endif
+
+#if defined(powerpc) || defined(__PPC__) || defined(__ppc__) || defined(_ARCH_PPC) || defined(__powerpc__) || defined(__powerpc) || defined(POWERPC) || defined(_M_PPC)
+ #define CPU_PPC
+ #if defined(_ARCH_PWR7)
+ #define CPU_POWER7
+ #elif defined(__64BIT__)
+ #define CPU_PPC64
+ #else
+ #define CPU_PPC32
+ #endif
+#endif
+
+#if defined(__hppa__) || defined(__hppa)
+ #define CPU_HPPA
+#endif
+
+#if defined(__alpha__) || defined(__alpha) || defined(_M_ALPHA)
+ #define CPU_ALPHA
+#endif
+
+/* 64 bit cpu */
+#if defined(CPU_X86_64) || defined(CPU_IA64) || defined(CPU_SPARC64) || defined(__64BIT__) || defined(__LP64__) || defined(_LP64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
+ #define CPU_64BITS
+#endif
+
+#if defined(COMPILER_MSVC)
+ typedef signed char int8_t;
+ typedef unsigned char uint8_t;
+ typedef signed short int16_t;
+ typedef unsigned short uint16_t;
+ typedef signed int int32_t;
+ typedef unsigned int uint32_t;
+ typedef signed __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+#else
+ #include
+#endif
+
diff --git a/src/ext/ed25519/donna/ed25519-donna-portable.h b/src/ext/ed25519/donna/ed25519-donna-portable.h
new file mode 100644
index 0000000000..0a0f7fc3af
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna-portable.h
@@ -0,0 +1,135 @@
+#include "ed25519-donna-portable-identify.h"
+
+#define mul32x32_64(a,b) (((uint64_t)(a))*(b))
+
+/* platform */
+#if defined(COMPILER_MSVC)
+ #include
+ #if !defined(_DEBUG)
+ #undef mul32x32_64
+ #define mul32x32_64(a,b) __emulu(a,b)
+ #endif
+ #undef inline
+ #define inline __forceinline
+ #define DONNA_INLINE __forceinline
+ #define DONNA_NOINLINE __declspec(noinline)
+ #define ALIGN(x) __declspec(align(x))
+ #define ROTL32(a,b) _rotl(a,b)
+ #define ROTR32(a,b) _rotr(a,b)
+#else
+ #include
+ #define DONNA_INLINE inline __attribute__((always_inline))
+ #define DONNA_NOINLINE __attribute__((noinline))
+ #define ALIGN(x) __attribute__((aligned(x)))
+ #define ROTL32(a,b) (((a) << (b)) | ((a) >> (32 - b)))
+ #define ROTR32(a,b) (((a) >> (b)) | ((a) << (32 - b)))
+#endif
+
+/* uint128_t */
+#if defined(CPU_64BITS) && !defined(ED25519_FORCE_32BIT)
+ #if defined(COMPILER_CLANG) && (COMPILER_CLANG >= 30100)
+ #define HAVE_NATIVE_UINT128
+ typedef unsigned __int128 uint128_t;
+ #elif defined(COMPILER_MSVC)
+ #define HAVE_UINT128
+ typedef struct uint128_t {
+ uint64_t lo, hi;
+ } uint128_t;
+ #define mul64x64_128(out,a,b) out.lo = _umul128(a,b,&out.hi);
+ #define shr128_pair(out,hi,lo,shift) out = __shiftright128(lo, hi, shift);
+ #define shl128_pair(out,hi,lo,shift) out = __shiftleft128(lo, hi, shift);
+ #define shr128(out,in,shift) shr128_pair(out, in.hi, in.lo, shift)
+ #define shl128(out,in,shift) shl128_pair(out, in.hi, in.lo, shift)
+ #define add128(a,b) { uint64_t p = a.lo; a.lo += b.lo; a.hi += b.hi + (a.lo < p); }
+ #define add128_64(a,b) { uint64_t p = a.lo; a.lo += b; a.hi += (a.lo < p); }
+ #define lo128(a) (a.lo)
+ #define hi128(a) (a.hi)
+ #elif defined(COMPILER_GCC) && !defined(HAVE_NATIVE_UINT128)
+ #if defined(__SIZEOF_INT128__)
+ #define HAVE_NATIVE_UINT128
+ typedef unsigned __int128 uint128_t;
+ #elif (COMPILER_GCC >= 40400)
+ #define HAVE_NATIVE_UINT128
+ typedef unsigned uint128_t __attribute__((mode(TI)));
+ #elif defined(CPU_X86_64)
+ #define HAVE_UINT128
+ typedef struct uint128_t {
+ uint64_t lo, hi;
+ } uint128_t;
+ #define mul64x64_128(out,a,b) __asm__ ("mulq %3" : "=a" (out.lo), "=d" (out.hi) : "a" (a), "rm" (b));
+ #define shr128_pair(out,hi,lo,shift) __asm__ ("shrdq %2,%1,%0" : "+r" (lo) : "r" (hi), "J" (shift)); out = lo;
+ #define shl128_pair(out,hi,lo,shift) __asm__ ("shldq %2,%1,%0" : "+r" (hi) : "r" (lo), "J" (shift)); out = hi;
+ #define shr128(out,in,shift) shr128_pair(out,in.hi, in.lo, shift)
+ #define shl128(out,in,shift) shl128_pair(out,in.hi, in.lo, shift)
+ #define add128(a,b) __asm__ ("addq %4,%2; adcq %5,%3" : "=r" (a.hi), "=r" (a.lo) : "1" (a.lo), "0" (a.hi), "rm" (b.lo), "rm" (b.hi) : "cc");
+ #define add128_64(a,b) __asm__ ("addq %4,%2; adcq $0,%3" : "=r" (a.hi), "=r" (a.lo) : "1" (a.lo), "0" (a.hi), "rm" (b) : "cc");
+ #define lo128(a) (a.lo)
+ #define hi128(a) (a.hi)
+ #endif
+ #endif
+
+ #if defined(HAVE_NATIVE_UINT128)
+ #define HAVE_UINT128
+ #define mul64x64_128(out,a,b) out = (uint128_t)a * b;
+ #define shr128_pair(out,hi,lo,shift) out = (uint64_t)((((uint128_t)hi << 64) | lo) >> (shift));
+ #define shl128_pair(out,hi,lo,shift) out = (uint64_t)(((((uint128_t)hi << 64) | lo) << (shift)) >> 64);
+ #define shr128(out,in,shift) out = (uint64_t)(in >> (shift));
+ #define shl128(out,in,shift) out = (uint64_t)((in << shift) >> 64);
+ #define add128(a,b) a += b;
+ #define add128_64(a,b) a += (uint64_t)b;
+ #define lo128(a) ((uint64_t)a)
+ #define hi128(a) ((uint64_t)(a >> 64))
+ #endif
+
+ #if !defined(HAVE_UINT128)
+ #error Need a uint128_t implementation!
+ #endif
+#endif
+
+/* endian */
+#if !defined(ED25519_OPENSSLRNG)
+static inline void U32TO8_LE(unsigned char *p, const uint32_t v) {
+ p[0] = (unsigned char)(v );
+ p[1] = (unsigned char)(v >> 8);
+ p[2] = (unsigned char)(v >> 16);
+ p[3] = (unsigned char)(v >> 24);
+}
+#endif
+
+#if !defined(HAVE_UINT128)
+static inline uint32_t U8TO32_LE(const unsigned char *p) {
+ return
+ (((uint32_t)(p[0]) ) |
+ ((uint32_t)(p[1]) << 8) |
+ ((uint32_t)(p[2]) << 16) |
+ ((uint32_t)(p[3]) << 24));
+}
+#else
+static inline uint64_t U8TO64_LE(const unsigned char *p) {
+ return
+ (((uint64_t)(p[0]) ) |
+ ((uint64_t)(p[1]) << 8) |
+ ((uint64_t)(p[2]) << 16) |
+ ((uint64_t)(p[3]) << 24) |
+ ((uint64_t)(p[4]) << 32) |
+ ((uint64_t)(p[5]) << 40) |
+ ((uint64_t)(p[6]) << 48) |
+ ((uint64_t)(p[7]) << 56));
+}
+
+static inline void U64TO8_LE(unsigned char *p, const uint64_t v) {
+ p[0] = (unsigned char)(v );
+ p[1] = (unsigned char)(v >> 8);
+ p[2] = (unsigned char)(v >> 16);
+ p[3] = (unsigned char)(v >> 24);
+ p[4] = (unsigned char)(v >> 32);
+ p[5] = (unsigned char)(v >> 40);
+ p[6] = (unsigned char)(v >> 48);
+ p[7] = (unsigned char)(v >> 56);
+}
+#endif
+
+#include
+#include
+
+
diff --git a/src/ext/ed25519/donna/ed25519-donna.h b/src/ext/ed25519/donna/ed25519-donna.h
new file mode 100644
index 0000000000..de1120f46f
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-donna.h
@@ -0,0 +1,115 @@
+/*
+ Public domain by Andrew M.
+ Modified from the amd64-51-30k implementation by
+ Daniel J. Bernstein
+ Niels Duif
+ Tanja Lange
+ Peter Schwabe
+ Bo-Yin Yang
+*/
+
+
+#include "ed25519-donna-portable.h"
+
+#if defined(ED25519_SSE2)
+#else
+ #if defined(HAVE_UINT128) && !defined(ED25519_FORCE_32BIT)
+ #define ED25519_64BIT
+ #else
+ #define ED25519_32BIT
+ #endif
+#endif
+
+#if !defined(ED25519_NO_INLINE_ASM)
+ /* detect extra features first so un-needed functions can be disabled throughout */
+ #if defined(ED25519_SSE2)
+ #if defined(COMPILER_GCC) && defined(CPU_X86)
+ #define ED25519_GCC_32BIT_SSE_CHOOSE
+ #elif defined(COMPILER_GCC) && defined(CPU_X86_64)
+ #define ED25519_GCC_64BIT_SSE_CHOOSE
+ #endif
+ #else
+ #if defined(CPU_X86_64)
+ #if defined(COMPILER_GCC)
+ #if defined(ED25519_64BIT)
+ #define ED25519_GCC_64BIT_X86_CHOOSE
+ #else
+ #define ED25519_GCC_64BIT_32BIT_CHOOSE
+ #endif
+ #endif
+ #endif
+ #endif
+#endif
+
+#if defined(ED25519_SSE2)
+ #include "curve25519-donna-sse2.h"
+#elif defined(ED25519_64BIT)
+ #include "curve25519-donna-64bit.h"
+#else
+ #include "curve25519-donna-32bit.h"
+#endif
+
+#include "curve25519-donna-helpers.h"
+
+/* separate uint128 check for 64 bit sse2 */
+#if defined(HAVE_UINT128) && !defined(ED25519_FORCE_32BIT)
+ #include "modm-donna-64bit.h"
+#else
+ #include "modm-donna-32bit.h"
+#endif
+
+typedef unsigned char hash_512bits[64];
+
+/*
+ Timing safe memory compare
+*/
+static int
+ed25519_verify(const unsigned char *x, const unsigned char *y, size_t len) {
+ size_t differentbits = 0;
+ while (len--)
+ differentbits |= (*x++ ^ *y++);
+ return (int) (1 & ((differentbits - 1) >> 8));
+}
+
+
+/*
+ * Arithmetic on the twisted Edwards curve -x^2 + y^2 = 1 + dx^2y^2
+ * with d = -(121665/121666) = 37095705934669439343138083508754565189542113879843219016388785533085940283555
+ * Base point: (15112221349535400772501151409588531511454012693041857206046113283949847762202,46316835694926478169428394003475163141307993866256225615783033603165251855960);
+ */
+
+typedef struct ge25519_t {
+ bignum25519 x, y, z, t;
+} ge25519;
+
+typedef struct ge25519_p1p1_t {
+ bignum25519 x, y, z, t;
+} ge25519_p1p1;
+
+typedef struct ge25519_niels_t {
+ bignum25519 ysubx, xaddy, t2d;
+} ge25519_niels;
+
+typedef struct ge25519_pniels_t {
+ bignum25519 ysubx, xaddy, z, t2d;
+} ge25519_pniels;
+
+#include "ed25519-donna-basepoint-table.h"
+
+#if defined(ED25519_64BIT)
+ #include "ed25519-donna-64bit-tables.h"
+ #include "ed25519-donna-64bit-x86.h"
+#else
+ #include "ed25519-donna-32bit-tables.h"
+ #include "ed25519-donna-64bit-x86-32bit.h"
+#endif
+
+
+#if defined(ED25519_SSE2)
+ #include "ed25519-donna-32bit-sse2.h"
+ #include "ed25519-donna-64bit-sse2.h"
+ #include "ed25519-donna-impl-sse2.h"
+#else
+ #include "ed25519-donna-impl-base.h"
+#endif
+
diff --git a/src/ext/ed25519/donna/ed25519-hash-custom.h b/src/ext/ed25519/donna/ed25519-hash-custom.h
new file mode 100644
index 0000000000..7dc249129d
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-hash-custom.h
@@ -0,0 +1,11 @@
+/*
+ a custom hash must have a 512bit digest and implement:
+
+ struct ed25519_hash_context;
+
+ void ed25519_hash_init(ed25519_hash_context *ctx);
+ void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen);
+ void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash);
+ void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen);
+*/
+
diff --git a/src/ext/ed25519/donna/ed25519-hash.h b/src/ext/ed25519/donna/ed25519-hash.h
new file mode 100644
index 0000000000..6ba8f52383
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-hash.h
@@ -0,0 +1,219 @@
+#if defined(ED25519_REFHASH)
+
+/* reference/slow SHA-512. really, do not use this */
+
+#define HASH_BLOCK_SIZE 128
+#define HASH_DIGEST_SIZE 64
+
+typedef struct sha512_state_t {
+ uint64_t H[8];
+ uint64_t T[2];
+ uint32_t leftover;
+ uint8_t buffer[HASH_BLOCK_SIZE];
+} sha512_state;
+
+typedef sha512_state ed25519_hash_context;
+
+static const uint64_t sha512_constants[80] = {
+ 0x428a2f98d728ae22ull, 0x7137449123ef65cdull, 0xb5c0fbcfec4d3b2full, 0xe9b5dba58189dbbcull,
+ 0x3956c25bf348b538ull, 0x59f111f1b605d019ull, 0x923f82a4af194f9bull, 0xab1c5ed5da6d8118ull,
+ 0xd807aa98a3030242ull, 0x12835b0145706fbeull, 0x243185be4ee4b28cull, 0x550c7dc3d5ffb4e2ull,
+ 0x72be5d74f27b896full, 0x80deb1fe3b1696b1ull, 0x9bdc06a725c71235ull, 0xc19bf174cf692694ull,
+ 0xe49b69c19ef14ad2ull, 0xefbe4786384f25e3ull, 0x0fc19dc68b8cd5b5ull, 0x240ca1cc77ac9c65ull,
+ 0x2de92c6f592b0275ull, 0x4a7484aa6ea6e483ull, 0x5cb0a9dcbd41fbd4ull, 0x76f988da831153b5ull,
+ 0x983e5152ee66dfabull, 0xa831c66d2db43210ull, 0xb00327c898fb213full, 0xbf597fc7beef0ee4ull,
+ 0xc6e00bf33da88fc2ull, 0xd5a79147930aa725ull, 0x06ca6351e003826full, 0x142929670a0e6e70ull,
+ 0x27b70a8546d22ffcull, 0x2e1b21385c26c926ull, 0x4d2c6dfc5ac42aedull, 0x53380d139d95b3dfull,
+ 0x650a73548baf63deull, 0x766a0abb3c77b2a8ull, 0x81c2c92e47edaee6ull, 0x92722c851482353bull,
+ 0xa2bfe8a14cf10364ull, 0xa81a664bbc423001ull, 0xc24b8b70d0f89791ull, 0xc76c51a30654be30ull,
+ 0xd192e819d6ef5218ull, 0xd69906245565a910ull, 0xf40e35855771202aull, 0x106aa07032bbd1b8ull,
+ 0x19a4c116b8d2d0c8ull, 0x1e376c085141ab53ull, 0x2748774cdf8eeb99ull, 0x34b0bcb5e19b48a8ull,
+ 0x391c0cb3c5c95a63ull, 0x4ed8aa4ae3418acbull, 0x5b9cca4f7763e373ull, 0x682e6ff3d6b2b8a3ull,
+ 0x748f82ee5defb2fcull, 0x78a5636f43172f60ull, 0x84c87814a1f0ab72ull, 0x8cc702081a6439ecull,
+ 0x90befffa23631e28ull, 0xa4506cebde82bde9ull, 0xbef9a3f7b2c67915ull, 0xc67178f2e372532bull,
+ 0xca273eceea26619cull, 0xd186b8c721c0c207ull, 0xeada7dd6cde0eb1eull, 0xf57d4f7fee6ed178ull,
+ 0x06f067aa72176fbaull, 0x0a637dc5a2c898a6ull, 0x113f9804bef90daeull, 0x1b710b35131c471bull,
+ 0x28db77f523047d84ull, 0x32caab7b40c72493ull, 0x3c9ebe0a15c9bebcull, 0x431d67c49c100d4cull,
+ 0x4cc5d4becb3e42b6ull, 0x597f299cfc657e2aull, 0x5fcb6fab3ad6faecull, 0x6c44198c4a475817ull
+};
+
+static uint64_t
+sha512_ROTR64(uint64_t x, int k) {
+ return (x >> k) | (x << (64 - k));
+}
+
+static uint64_t
+sha512_LOAD64_BE(const uint8_t *p) {
+ return
+ ((uint64_t)p[0] << 56) |
+ ((uint64_t)p[1] << 48) |
+ ((uint64_t)p[2] << 40) |
+ ((uint64_t)p[3] << 32) |
+ ((uint64_t)p[4] << 24) |
+ ((uint64_t)p[5] << 16) |
+ ((uint64_t)p[6] << 8) |
+ ((uint64_t)p[7] );
+}
+
+static void
+sha512_STORE64_BE(uint8_t *p, uint64_t v) {
+ p[0] = (uint8_t)(v >> 56);
+ p[1] = (uint8_t)(v >> 48);
+ p[2] = (uint8_t)(v >> 40);
+ p[3] = (uint8_t)(v >> 32);
+ p[4] = (uint8_t)(v >> 24);
+ p[5] = (uint8_t)(v >> 16);
+ p[6] = (uint8_t)(v >> 8);
+ p[7] = (uint8_t)(v );
+}
+
+#define Ch(x,y,z) (z ^ (x & (y ^ z)))
+#define Maj(x,y,z) (((x | y) & z) | (x & y))
+#define S0(x) (sha512_ROTR64(x, 28) ^ sha512_ROTR64(x, 34) ^ sha512_ROTR64(x, 39))
+#define S1(x) (sha512_ROTR64(x, 14) ^ sha512_ROTR64(x, 18) ^ sha512_ROTR64(x, 41))
+#define G0(x) (sha512_ROTR64(x, 1) ^ sha512_ROTR64(x, 8) ^ (x >> 7))
+#define G1(x) (sha512_ROTR64(x, 19) ^ sha512_ROTR64(x, 61) ^ (x >> 6))
+#define W0(in,i) (sha512_LOAD64_BE(&in[i * 8]))
+#define W1(i) (G1(w[i - 2]) + w[i - 7] + G0(w[i - 15]) + w[i - 16])
+#define STEP(i) \
+ t1 = S0(r[0]) + Maj(r[0], r[1], r[2]); \
+ t0 = r[7] + S1(r[4]) + Ch(r[4], r[5], r[6]) + sha512_constants[i] + w[i]; \
+ r[7] = r[6]; \
+ r[6] = r[5]; \
+ r[5] = r[4]; \
+ r[4] = r[3] + t0; \
+ r[3] = r[2]; \
+ r[2] = r[1]; \
+ r[1] = r[0]; \
+ r[0] = t0 + t1;
+
+static void
+sha512_blocks(sha512_state *S, const uint8_t *in, size_t blocks) {
+ uint64_t r[8], w[80], t0, t1;
+ size_t i;
+
+ for (i = 0; i < 8; i++) r[i] = S->H[i];
+
+ while (blocks--) {
+ for (i = 0; i < 16; i++) { w[i] = W0(in, i); }
+ for (i = 16; i < 80; i++) { w[i] = W1(i); }
+ for (i = 0; i < 80; i++) { STEP(i); }
+ for (i = 0; i < 8; i++) { r[i] += S->H[i]; S->H[i] = r[i]; }
+ S->T[0] += HASH_BLOCK_SIZE * 8;
+ S->T[1] += (!S->T[0]) ? 1 : 0;
+ in += HASH_BLOCK_SIZE;
+ }
+}
+
+static void
+ed25519_hash_init(sha512_state *S) {
+ S->H[0] = 0x6a09e667f3bcc908ull;
+ S->H[1] = 0xbb67ae8584caa73bull;
+ S->H[2] = 0x3c6ef372fe94f82bull;
+ S->H[3] = 0xa54ff53a5f1d36f1ull;
+ S->H[4] = 0x510e527fade682d1ull;
+ S->H[5] = 0x9b05688c2b3e6c1full;
+ S->H[6] = 0x1f83d9abfb41bd6bull;
+ S->H[7] = 0x5be0cd19137e2179ull;
+ S->T[0] = 0;
+ S->T[1] = 0;
+ S->leftover = 0;
+}
+
+static void
+ed25519_hash_update(sha512_state *S, const uint8_t *in, size_t inlen) {
+ size_t blocks, want;
+
+ /* handle the previous data */
+ if (S->leftover) {
+ want = (HASH_BLOCK_SIZE - S->leftover);
+ want = (want < inlen) ? want : inlen;
+ memcpy(S->buffer + S->leftover, in, want);
+ S->leftover += (uint32_t)want;
+ if (S->leftover < HASH_BLOCK_SIZE)
+ return;
+ in += want;
+ inlen -= want;
+ sha512_blocks(S, S->buffer, 1);
+ }
+
+ /* handle the current data */
+ blocks = (inlen & ~(HASH_BLOCK_SIZE - 1));
+ S->leftover = (uint32_t)(inlen - blocks);
+ if (blocks) {
+ sha512_blocks(S, in, blocks / HASH_BLOCK_SIZE);
+ in += blocks;
+ }
+
+ /* handle leftover data */
+ if (S->leftover)
+ memcpy(S->buffer, in, S->leftover);
+}
+
+static void
+ed25519_hash_final(sha512_state *S, uint8_t *hash) {
+ uint64_t t0 = S->T[0] + (S->leftover * 8), t1 = S->T[1];
+
+ S->buffer[S->leftover] = 0x80;
+ if (S->leftover <= 111) {
+ memset(S->buffer + S->leftover + 1, 0, 111 - S->leftover);
+ } else {
+ memset(S->buffer + S->leftover + 1, 0, 127 - S->leftover);
+ sha512_blocks(S, S->buffer, 1);
+ memset(S->buffer, 0, 112);
+ }
+
+ sha512_STORE64_BE(S->buffer + 112, t1);
+ sha512_STORE64_BE(S->buffer + 120, t0);
+ sha512_blocks(S, S->buffer, 1);
+
+ sha512_STORE64_BE(&hash[ 0], S->H[0]);
+ sha512_STORE64_BE(&hash[ 8], S->H[1]);
+ sha512_STORE64_BE(&hash[16], S->H[2]);
+ sha512_STORE64_BE(&hash[24], S->H[3]);
+ sha512_STORE64_BE(&hash[32], S->H[4]);
+ sha512_STORE64_BE(&hash[40], S->H[5]);
+ sha512_STORE64_BE(&hash[48], S->H[6]);
+ sha512_STORE64_BE(&hash[56], S->H[7]);
+}
+
+static void
+ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen) {
+ ed25519_hash_context ctx;
+ ed25519_hash_init(&ctx);
+ ed25519_hash_update(&ctx, in, inlen);
+ ed25519_hash_final(&ctx, hash);
+}
+
+#elif defined(ED25519_CUSTOMHASH)
+
+#include "ed25519-hash-custom.h"
+
+#else
+
+#include
+
+typedef SHA512_CTX ed25519_hash_context;
+
+static void
+ed25519_hash_init(ed25519_hash_context *ctx) {
+ SHA512_Init(ctx);
+}
+
+static void
+ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen) {
+ SHA512_Update(ctx, in, inlen);
+}
+
+static void
+ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash) {
+ SHA512_Final(hash, ctx);
+}
+
+static void
+ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen) {
+ SHA512(in, inlen, hash);
+}
+
+#endif
+
diff --git a/src/ext/ed25519/donna/ed25519-randombytes-custom.h b/src/ext/ed25519/donna/ed25519-randombytes-custom.h
new file mode 100644
index 0000000000..9f5106340c
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-randombytes-custom.h
@@ -0,0 +1,8 @@
+/*
+ a custom randombytes must implement:
+
+ void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len);
+
+ ed25519_randombytes_unsafe is used by the batch verification function
+ to create random scalars
+*/
diff --git a/src/ext/ed25519/donna/ed25519-randombytes.h b/src/ext/ed25519/donna/ed25519-randombytes.h
new file mode 100644
index 0000000000..1dc629028e
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519-randombytes.h
@@ -0,0 +1,91 @@
+#if defined(ED25519_TEST)
+/*
+ ISAAC+ "variant", the paper is not clear on operator precedence and other
+ things. This is the "first in, first out" option!
+
+ Not threadsafe or securely initialized, only for deterministic testing
+*/
+typedef struct isaacp_state_t {
+ uint32_t state[256];
+ unsigned char buffer[1024];
+ uint32_t a, b, c;
+ size_t left;
+} isaacp_state;
+
+#define isaacp_step(offset, mix) \
+ x = mm[i + offset]; \
+ a = (a ^ (mix)) + (mm[(i + offset + 128) & 0xff]); \
+ y = (a ^ b) + mm[(x >> 2) & 0xff]; \
+ mm[i + offset] = y; \
+ b = (x + a) ^ mm[(y >> 10) & 0xff]; \
+ U32TO8_LE(out + (i + offset) * 4, b);
+
+static void
+isaacp_mix(isaacp_state *st) {
+ uint32_t i, x, y;
+ uint32_t a = st->a, b = st->b, c = st->c;
+ uint32_t *mm = st->state;
+ unsigned char *out = st->buffer;
+
+ c = c + 1;
+ b = b + c;
+
+ for (i = 0; i < 256; i += 4) {
+ isaacp_step(0, ROTL32(a,13))
+ isaacp_step(1, ROTR32(a, 6))
+ isaacp_step(2, ROTL32(a, 2))
+ isaacp_step(3, ROTR32(a,16))
+ }
+
+ st->a = a;
+ st->b = b;
+ st->c = c;
+ st->left = 1024;
+}
+
+static void
+isaacp_random(isaacp_state *st, void *p, size_t len) {
+ size_t use;
+ unsigned char *c = (unsigned char *)p;
+ while (len) {
+ use = (len > st->left) ? st->left : len;
+ memcpy(c, st->buffer + (sizeof(st->buffer) - st->left), use);
+
+ st->left -= use;
+ c += use;
+ len -= use;
+
+ if (!st->left)
+ isaacp_mix(st);
+ }
+}
+
+void
+ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len) {
+ static int initialized = 0;
+ static isaacp_state rng;
+
+ if (!initialized) {
+ memset(&rng, 0, sizeof(rng));
+ isaacp_mix(&rng);
+ isaacp_mix(&rng);
+ initialized = 1;
+ }
+
+ isaacp_random(&rng, p, len);
+}
+#elif defined(ED25519_CUSTOMRANDOM)
+
+#include "ed25519-randombytes-custom.h"
+
+#else
+
+#include
+
+void
+ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len) {
+
+ RAND_bytes(p, (int) len);
+
+}
+#endif
diff --git a/src/ext/ed25519/donna/ed25519.c b/src/ext/ed25519/donna/ed25519.c
new file mode 100644
index 0000000000..58a755b8d3
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519.c
@@ -0,0 +1,150 @@
+/*
+ Public domain by Andrew M.
+
+ Ed25519 reference implementation using Ed25519-donna
+*/
+
+
+/* define ED25519_SUFFIX to have it appended to the end of each public function */
+#if !defined(ED25519_SUFFIX)
+#define ED25519_SUFFIX
+#endif
+
+#define ED25519_FN3(fn,suffix) fn##suffix
+#define ED25519_FN2(fn,suffix) ED25519_FN3(fn,suffix)
+#define ED25519_FN(fn) ED25519_FN2(fn,ED25519_SUFFIX)
+
+#include "ed25519-donna.h"
+#include "ed25519.h"
+#include "ed25519-randombytes.h"
+#include "ed25519-hash.h"
+
+/*
+ Generates a (extsk[0..31]) and aExt (extsk[32..63])
+*/
+
+DONNA_INLINE static void
+ed25519_extsk(hash_512bits extsk, const ed25519_secret_key sk) {
+ ed25519_hash(extsk, sk, 32);
+ extsk[0] &= 248;
+ extsk[31] &= 127;
+ extsk[31] |= 64;
+}
+
+static void
+ed25519_hram(hash_512bits hram, const ed25519_signature RS, const ed25519_public_key pk, const unsigned char *m, size_t mlen) {
+ ed25519_hash_context ctx;
+ ed25519_hash_init(&ctx);
+ ed25519_hash_update(&ctx, RS, 32);
+ ed25519_hash_update(&ctx, pk, 32);
+ ed25519_hash_update(&ctx, m, mlen);
+ ed25519_hash_final(&ctx, hram);
+}
+
+void
+ED25519_FN(ed25519_publickey) (const ed25519_secret_key sk, ed25519_public_key pk) {
+ bignum256modm a;
+ ge25519 ALIGN(16) A;
+ hash_512bits extsk;
+
+ /* A = aB */
+ ed25519_extsk(extsk, sk);
+ expand256_modm(a, extsk, 32);
+ ge25519_scalarmult_base_niels(&A, ge25519_niels_base_multiples, a);
+ ge25519_pack(pk, &A);
+}
+
+
+void
+ED25519_FN(ed25519_sign) (const unsigned char *m, size_t mlen, const ed25519_secret_key sk, const ed25519_public_key pk, ed25519_signature RS) {
+ ed25519_hash_context ctx;
+ bignum256modm r, S, a;
+ ge25519 ALIGN(16) R;
+ hash_512bits extsk, hashr, hram;
+
+ ed25519_extsk(extsk, sk);
+
+ /* r = H(aExt[32..64], m) */
+ ed25519_hash_init(&ctx);
+ ed25519_hash_update(&ctx, extsk + 32, 32);
+ ed25519_hash_update(&ctx, m, mlen);
+ ed25519_hash_final(&ctx, hashr);
+ expand256_modm(r, hashr, 64);
+
+ /* R = rB */
+ ge25519_scalarmult_base_niels(&R, ge25519_niels_base_multiples, r);
+ ge25519_pack(RS, &R);
+
+ /* S = H(R,A,m).. */
+ ed25519_hram(hram, RS, pk, m, mlen);
+ expand256_modm(S, hram, 64);
+
+ /* S = H(R,A,m)a */
+ expand256_modm(a, extsk, 32);
+ mul256_modm(S, S, a);
+
+ /* S = (r + H(R,A,m)a) */
+ add256_modm(S, S, r);
+
+ /* S = (r + H(R,A,m)a) mod L */
+ contract256_modm(RS + 32, S);
+}
+
+int
+ED25519_FN(ed25519_sign_open) (const unsigned char *m, size_t mlen, const ed25519_public_key pk, const ed25519_signature RS) {
+ ge25519 ALIGN(16) R, A;
+ hash_512bits hash;
+ bignum256modm hram, S;
+ unsigned char checkR[32];
+
+ if ((RS[63] & 224) || !ge25519_unpack_negative_vartime(&A, pk))
+ return -1;
+
+ /* hram = H(R,A,m) */
+ ed25519_hram(hash, RS, pk, m, mlen);
+ expand256_modm(hram, hash, 64);
+
+ /* S */
+ expand256_modm(S, RS + 32, 32);
+
+ /* SB - H(R,A,m)A */
+ ge25519_double_scalarmult_vartime(&R, &A, hram, S);
+ ge25519_pack(checkR, &R);
+
+ /* check that R = SB - H(R,A,m)A */
+ return ed25519_verify(RS, checkR, 32) ? 0 : -1;
+}
+
+#include "ed25519-donna-batchverify.h"
+
+/*
+ Fast Curve25519 basepoint scalar multiplication
+*/
+
+void
+ED25519_FN(curved25519_scalarmult_basepoint) (curved25519_key pk, const curved25519_key e) {
+ curved25519_key ec;
+ bignum256modm s;
+ bignum25519 ALIGN(16) yplusz, zminusy;
+ ge25519 ALIGN(16) p;
+ size_t i;
+
+ /* clamp */
+ for (i = 0; i < 32; i++) ec[i] = e[i];
+ ec[0] &= 248;
+ ec[31] &= 127;
+ ec[31] |= 64;
+
+ expand_raw256_modm(s, ec);
+
+ /* scalar * basepoint */
+ ge25519_scalarmult_base_niels(&p, ge25519_niels_base_multiples, s);
+
+ /* u = (y + z) / (z - y) */
+ curve25519_add(yplusz, p.y, p.z);
+ curve25519_sub(zminusy, p.z, p.y);
+ curve25519_recip(zminusy, zminusy);
+ curve25519_mul(yplusz, yplusz, zminusy);
+ curve25519_contract(pk, yplusz);
+}
+
diff --git a/src/ext/ed25519/donna/ed25519.h b/src/ext/ed25519/donna/ed25519.h
new file mode 100644
index 0000000000..dc86675cd1
--- /dev/null
+++ b/src/ext/ed25519/donna/ed25519.h
@@ -0,0 +1,30 @@
+#ifndef ED25519_H
+#define ED25519_H
+
+#include
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+typedef unsigned char ed25519_signature[64];
+typedef unsigned char ed25519_public_key[32];
+typedef unsigned char ed25519_secret_key[32];
+
+typedef unsigned char curved25519_key[32];
+
+void ed25519_publickey(const ed25519_secret_key sk, ed25519_public_key pk);
+int ed25519_sign_open(const unsigned char *m, size_t mlen, const ed25519_public_key pk, const ed25519_signature RS);
+void ed25519_sign(const unsigned char *m, size_t mlen, const ed25519_secret_key sk, const ed25519_public_key pk, ed25519_signature RS);
+
+int ed25519_sign_open_batch(const unsigned char **m, size_t *mlen, const unsigned char **pk, const unsigned char **RS, size_t num, int *valid);
+
+void ed25519_randombytes_unsafe(void *out, size_t count);
+
+void curved25519_scalarmult_basepoint(curved25519_key pk, const curved25519_key e);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif // ED25519_H
diff --git a/src/ext/ed25519/donna/fuzz/README.md b/src/ext/ed25519/donna/fuzz/README.md
new file mode 100644
index 0000000000..306ddfe08c
--- /dev/null
+++ b/src/ext/ed25519/donna/fuzz/README.md
@@ -0,0 +1,173 @@
+This code fuzzes ed25519-donna (and optionally ed25519-donna-sse2) against the ref10 implementations of
+[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and
+[ed25519](https://github.com/floodyberry/supercop/tree/master/crypto_sign/ed25519/ref10).
+
+Curve25519 tests that generating a public key from a secret key
+
+# Building
+
+## *nix + PHP
+
+`php build-nix.php (required parameters) (optional parameters)`
+
+Required parameters:
+
+* `--function=[curve25519,ed25519]`
+* `--bits=[32,64]`
+
+Optional parameters:
+
+* `--with-sse2`
+
+ Also fuzz against ed25519-donna-sse2
+* `--with-openssl`
+
+ Build with OpenSSL's SHA-512.
+
+ Default: Reference SHA-512 implementation (slow!)
+
+* `--compiler=[gcc,clang,icc]`
+
+ Default: gcc
+
+* `--no-asm`
+
+ Do not use platform specific assembler
+
+
+example:
+
+ php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc
+
+## Windows
+
+Create a project with access to the ed25519 files.
+
+If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects
+"Properties/Preprocessor/Preprocessor Definitions" option
+
+Add the following files to the project:
+
+* `fuzz/curve25519-ref10.c`
+* `fuzz/ed25519-ref10.c`
+* `fuzz/ed25519-donna.c`
+* `fuzz/ed25519-donna-sse2.c` (optional)
+* `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz)
+
+If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under
+its "Properties/Preprocessor/Preprocessor Definitions" option.
+
+# Running
+
+If everything agrees, the program will only output occasional status dots (every 0x1000 passes)
+and a 64bit progress count (every 0x20000 passes):
+
+ fuzzing: ref10 curved25519 curved25519-sse2
+
+ ................................ [0000000000020000]
+ ................................ [0000000000040000]
+ ................................ [0000000000060000]
+ ................................ [0000000000080000]
+ ................................ [00000000000a0000]
+ ................................ [00000000000c0000]
+
+If any of the implementations do not agree with the ref10 implementation, the program will dump
+the random data that was used, the data generated by the ref10 implementation, and diffs of the
+ed25519-donna data against the ref10 data.
+
+## Example errors
+
+These are example error dumps (with intentionally introduced errors).
+
+### Ed25519
+
+Random data:
+
+* sk, or Secret Key
+* m, or Message
+
+Generated data:
+
+* pk, or Public Key
+* sig, or Signature
+* valid, or if the signature of the message is valid with the public key
+
+Dump:
+
+ sk:
+ 0x3b,0xb7,0x17,0x7a,0x66,0xdc,0xb7,0x9a,0x90,0x25,0x07,0x99,0x96,0xf3,0x92,0xef,
+ 0x78,0xf8,0xad,0x6c,0x35,0x87,0x81,0x67,0x03,0xe6,0x95,0xba,0x06,0x18,0x7c,0x9c,
+
+ m:
+ 0x7c,0x8d,0x3d,0xe1,0x92,0xee,0x7a,0xb8,0x4d,0xc9,0xfb,0x02,0x34,0x1e,0x5a,0x91,
+ 0xee,0x01,0xa6,0xb8,0xab,0x37,0x3f,0x3d,0x6d,0xa2,0x47,0xe3,0x27,0x93,0x7c,0xb7,
+ 0x77,0x07,0xb6,0x88,0x41,0x22,0xf3,0x3f,0xce,0xcb,0x6b,0x3e,0x2b,0x23,0x68,0x7f,
+ 0x5b,0xb9,0xda,0x04,0xbb,0xae,0x42,0x50,0xf5,0xe9,0xc5,0x11,0xbd,0x52,0x76,0x98,
+ 0xf1,0x87,0x09,0xb9,0x89,0x0a,0x52,0x69,0x01,0xce,0xe0,0x4a,0xa6,0x46,0x5a,0xe1,
+ 0x63,0x14,0xe0,0x81,0x52,0xec,0xcd,0xcf,0x70,0x54,0x7d,0xa3,0x49,0x8b,0xf0,0x89,
+ 0x70,0x07,0x12,0x2a,0xd9,0xaa,0x16,0x01,0xb2,0x16,0x3a,0xbb,0xfc,0xfa,0x13,0x5b,
+ 0x69,0x83,0x92,0x70,0x95,0x76,0xa0,0x8e,0x16,0x79,0xcc,0xaa,0xb5,0x7c,0xf8,0x7a,
+
+ ref10:
+ pk:
+ 0x71,0xb0,0x5e,0x62,0x1b,0xe3,0xe7,0x36,0x91,0x8b,0xc0,0x13,0x36,0x0c,0xc9,0x04,
+ 0x16,0xf5,0xff,0x48,0x0c,0x83,0x6b,0x88,0x53,0xa2,0xc6,0x0f,0xf7,0xac,0x42,0x04,
+
+ sig:
+ 0x3e,0x05,0xc5,0x37,0x16,0x0b,0x29,0x30,0x89,0xa3,0xe7,0x83,0x08,0x16,0xdd,0x96,
+ 0x02,0xfa,0x0d,0x44,0x2c,0x43,0xaa,0x80,0x93,0x04,0x58,0x22,0x09,0xbf,0x11,0xa5,
+ 0xcc,0xa5,0x3c,0x9f,0xa0,0xa4,0x64,0x5a,0x4a,0xdb,0x20,0xfb,0xc7,0x9b,0xfd,0x3f,
+ 0x08,0xae,0xc4,0x3c,0x1e,0xd8,0xb6,0xb4,0xd2,0x6d,0x80,0x92,0xcb,0x71,0xf3,0x02,
+
+ valid: yes
+
+ ed25519-donna:
+ pk diff:
+ ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
+ ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
+
+ sig diff:
+ 0x2c,0xb9,0x25,0x14,0xd0,0x94,0xeb,0xfe,0x46,0x02,0xc2,0xe8,0xa3,0xeb,0xbf,0xb5,
+ 0x72,0x84,0xbf,0xc1,0x8a,0x32,0x30,0x99,0xf7,0x58,0xfe,0x06,0xa8,0xdc,0xdc,0xab,
+ 0xb5,0x57,0x03,0x33,0x87,0xce,0x54,0x55,0x6a,0x69,0x8a,0xc4,0xb7,0x2a,0xed,0x97,
+ 0xb4,0x68,0xe7,0x52,0x7a,0x07,0x55,0x3b,0xa2,0x94,0xd6,0x5e,0xa1,0x61,0x80,0x08,
+
+ valid: no
+
+In this case, the generated public key matches, but the generated signature is completely
+different and does not validate.
+
+### Curve25519
+
+Random data:
+
+* sk, or Secret Key
+
+Generated data:
+
+* pk, or Public Key
+
+Dump:
+
+ sk:
+ 0x44,0xec,0x0b,0x0e,0xa2,0x0e,0x9c,0x5b,0x8c,0xce,0x7b,0x1d,0x68,0xae,0x0f,0x9e,
+ 0x81,0xe2,0x04,0x76,0xda,0x87,0xa4,0x9e,0xc9,0x4f,0x3b,0xf9,0xc3,0x89,0x63,0x70,
+
+
+ ref10:
+ 0x24,0x55,0x55,0xc0,0xf9,0x80,0xaf,0x02,0x43,0xee,0x8c,0x7f,0xc1,0xad,0x90,0x95,
+ 0x57,0x91,0x14,0x2e,0xf2,0x14,0x22,0x80,0xdd,0x4e,0x3c,0x85,0x71,0x84,0x8c,0x62,
+
+
+ curved25519 diff:
+ 0x12,0xd1,0x61,0x2b,0x16,0xb3,0xd8,0x29,0xf8,0xa3,0xba,0x70,0x4e,0x49,0x4f,0x43,
+ 0xa1,0x3c,0x6b,0x42,0x11,0x61,0xcc,0x30,0x87,0x73,0x46,0xfb,0x85,0xc7,0x9a,0x35,
+
+
+ curved25519-sse2 diff:
+ ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
+ ____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
+
+
+In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference
+implementation.
\ No newline at end of file
diff --git a/src/ext/ed25519/donna/fuzz/build-nix.php b/src/ext/ed25519/donna/fuzz/build-nix.php
new file mode 100644
index 0000000000..c69144ebc9
--- /dev/null
+++ b/src/ext/ed25519/donna/fuzz/build-nix.php
@@ -0,0 +1,134 @@
+set = false;
+
+ $map = array();
+ foreach($legal_values as $value)
+ $map[$value] = true;
+
+ for ($i = 1; $i < $argc; $i++) {
+ if (!preg_match("!--".$flag."=(.*)!", $argv[$i], $m))
+ continue;
+ if (isset($map[$m[1]])) {
+ $this->value = $m[1];
+ $this->set = true;
+ return;
+ } else {
+ usage("{$m[1]} is not a valid parameter to --{$flag}!");
+ exit(1);
+ }
+ }
+ }
+ }
+
+ class flag extends argument {
+ function flag($flag) {
+ global $argc, $argv;
+
+ $this->set = false;
+
+ $flag = "--{$flag}";
+ for ($i = 1; $i < $argc; $i++) {
+ if ($argv[$i] !== $flag)
+ continue;
+ $this->value = true;
+ $this->set = true;
+ return;
+ }
+ }
+ }
+
+ $bits = new multiargument("bits", array("32", "64"));
+ $function = new multiargument("function", array("curve25519", "ed25519"));
+ $compiler = new multiargument("compiler", array("gcc", "clang", "icc"));
+ $with_sse2 = new flag("with-sse2");
+ $with_openssl = new flag("with-openssl");
+ $no_asm = new flag("no-asm");
+
+ $err = "";
+ if (!$bits->set)
+ $err .= "--bits not set\n";
+ if (!$function->set)
+ $err .= "--function not set\n";
+
+ if ($err !== "") {
+ usage($err);
+ exit;
+ }
+
+ $compile = ($compiler->set) ? $compiler->value : "gcc";
+ $link = "";
+ $flags = "-O3 -m{$bits->value}";
+ $ret = 0;
+
+ if ($with_openssl->set) $link .= " -lssl -lcrypto";
+ if (!$with_openssl->set) $flags .= " -DED25519_REFHASH -DED25519_TEST";
+ if ($no_asm->set) $flags .= " -DED25519_NO_INLINE_ASM";
+
+ if ($function->value === "curve25519") {
+ runcmd("building ref10..", "{$compile} {$flags} curve25519-ref10.c -c -o curve25519-ref10.o");
+ runcmd("building ed25519..", "{$compile} {$flags} ed25519-donna.c -c -o ed25519.o");
+ if ($with_sse2->set) {
+ runcmd("building ed25519-sse2..", "{$compile} {$flags} ed25519-donna-sse2.c -c -o ed25519-sse2.o -msse2");
+ $flags .= " -DED25519_SSE2";
+ $link .= " ed25519-sse2.o";
+ }
+ runcmd("linking..", "{$compile} {$flags} {$link} fuzz-curve25519.c ed25519.o curve25519-ref10.o -o fuzz-curve25519");
+ echoln("fuzz-curve25519 built.");
+ } else if ($function->value === "ed25519") {
+ runcmd("building ref10..", "{$compile} {$flags} ed25519-ref10.c -c -o ed25519-ref10.o");
+ runcmd("building ed25519..", "{$compile} {$flags} ed25519-donna.c -c -o ed25519.o");
+ if ($with_sse2->set) {
+ runcmd("building ed25519-sse2..", "{$compile} {$flags} ed25519-donna-sse2.c -c -o ed25519-sse2.o -msse2");
+ $flags .= " -DED25519_SSE2";
+ $link .= " ed25519-sse2.o";
+ }
+ runcmd("linking..", "{$compile} {$flags} {$link} fuzz-ed25519.c ed25519.o ed25519-ref10.o -o fuzz-ed25519");
+ echoln("fuzz-ed25519 built.");
+ }
+
+
+ cleanup();
+?>
diff --git a/src/ext/ed25519/donna/fuzz/curve25519-ref10.c b/src/ext/ed25519/donna/fuzz/curve25519-ref10.c
new file mode 100644
index 0000000000..efefce6fde
--- /dev/null
+++ b/src/ext/ed25519/donna/fuzz/curve25519-ref10.c
@@ -0,0 +1,1272 @@
+#include
+
+typedef int32_t crypto_int32;
+typedef int64_t crypto_int64;
+typedef uint64_t crypto_uint64;
+
+typedef crypto_int32 fe[10];
+
+/*
+h = 0
+*/
+
+void fe_0(fe h)
+{
+ h[0] = 0;
+ h[1] = 0;
+ h[2] = 0;
+ h[3] = 0;
+ h[4] = 0;
+ h[5] = 0;
+ h[6] = 0;
+ h[7] = 0;
+ h[8] = 0;
+ h[9] = 0;
+}
+
+/*
+h = 1
+*/
+
+void fe_1(fe h)
+{
+ h[0] = 1;
+ h[1] = 0;
+ h[2] = 0;
+ h[3] = 0;
+ h[4] = 0;
+ h[5] = 0;
+ h[6] = 0;
+ h[7] = 0;
+ h[8] = 0;
+ h[9] = 0;
+}
+
+/*
+h = f + g
+Can overlap h with f or g.
+
+Preconditions:
+ |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+ |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+
+Postconditions:
+ |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+*/
+
+void fe_add(fe h,fe f,fe g)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 g0 = g[0];
+ crypto_int32 g1 = g[1];
+ crypto_int32 g2 = g[2];
+ crypto_int32 g3 = g[3];
+ crypto_int32 g4 = g[4];
+ crypto_int32 g5 = g[5];
+ crypto_int32 g6 = g[6];
+ crypto_int32 g7 = g[7];
+ crypto_int32 g8 = g[8];
+ crypto_int32 g9 = g[9];
+ crypto_int32 h0 = f0 + g0;
+ crypto_int32 h1 = f1 + g1;
+ crypto_int32 h2 = f2 + g2;
+ crypto_int32 h3 = f3 + g3;
+ crypto_int32 h4 = f4 + g4;
+ crypto_int32 h5 = f5 + g5;
+ crypto_int32 h6 = f6 + g6;
+ crypto_int32 h7 = f7 + g7;
+ crypto_int32 h8 = f8 + g8;
+ crypto_int32 h9 = f9 + g9;
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+/*
+h = f
+*/
+
+void fe_copy(fe h,fe f)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ h[0] = f0;
+ h[1] = f1;
+ h[2] = f2;
+ h[3] = f3;
+ h[4] = f4;
+ h[5] = f5;
+ h[6] = f6;
+ h[7] = f7;
+ h[8] = f8;
+ h[9] = f9;
+}
+
+
+/*
+Replace (f,g) with (g,f) if b == 1;
+replace (f,g) with (f,g) if b == 0.
+
+Preconditions: b in {0,1}.
+*/
+
+void fe_cswap(fe f,fe g,unsigned int b)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 g0 = g[0];
+ crypto_int32 g1 = g[1];
+ crypto_int32 g2 = g[2];
+ crypto_int32 g3 = g[3];
+ crypto_int32 g4 = g[4];
+ crypto_int32 g5 = g[5];
+ crypto_int32 g6 = g[6];
+ crypto_int32 g7 = g[7];
+ crypto_int32 g8 = g[8];
+ crypto_int32 g9 = g[9];
+ crypto_int32 x0 = f0 ^ g0;
+ crypto_int32 x1 = f1 ^ g1;
+ crypto_int32 x2 = f2 ^ g2;
+ crypto_int32 x3 = f3 ^ g3;
+ crypto_int32 x4 = f4 ^ g4;
+ crypto_int32 x5 = f5 ^ g5;
+ crypto_int32 x6 = f6 ^ g6;
+ crypto_int32 x7 = f7 ^ g7;
+ crypto_int32 x8 = f8 ^ g8;
+ crypto_int32 x9 = f9 ^ g9;
+ b = -b;
+ x0 &= b;
+ x1 &= b;
+ x2 &= b;
+ x3 &= b;
+ x4 &= b;
+ x5 &= b;
+ x6 &= b;
+ x7 &= b;
+ x8 &= b;
+ x9 &= b;
+ f[0] = f0 ^ x0;
+ f[1] = f1 ^ x1;
+ f[2] = f2 ^ x2;
+ f[3] = f3 ^ x3;
+ f[4] = f4 ^ x4;
+ f[5] = f5 ^ x5;
+ f[6] = f6 ^ x6;
+ f[7] = f7 ^ x7;
+ f[8] = f8 ^ x8;
+ f[9] = f9 ^ x9;
+ g[0] = g0 ^ x0;
+ g[1] = g1 ^ x1;
+ g[2] = g2 ^ x2;
+ g[3] = g3 ^ x3;
+ g[4] = g4 ^ x4;
+ g[5] = g5 ^ x5;
+ g[6] = g6 ^ x6;
+ g[7] = g7 ^ x7;
+ g[8] = g8 ^ x8;
+ g[9] = g9 ^ x9;
+}
+
+static crypto_uint64 load_3(const unsigned char *in)
+{
+ crypto_uint64 result;
+ result = (crypto_uint64) in[0];
+ result |= ((crypto_uint64) in[1]) << 8;
+ result |= ((crypto_uint64) in[2]) << 16;
+ return result;
+}
+
+static crypto_uint64 load_4(const unsigned char *in)
+{
+ crypto_uint64 result;
+ result = (crypto_uint64) in[0];
+ result |= ((crypto_uint64) in[1]) << 8;
+ result |= ((crypto_uint64) in[2]) << 16;
+ result |= ((crypto_uint64) in[3]) << 24;
+ return result;
+}
+
+void fe_frombytes(fe h,const unsigned char *s)
+{
+ crypto_int64 h0 = load_4(s);
+ crypto_int64 h1 = load_3(s + 4) << 6;
+ crypto_int64 h2 = load_3(s + 7) << 5;
+ crypto_int64 h3 = load_3(s + 10) << 3;
+ crypto_int64 h4 = load_3(s + 13) << 2;
+ crypto_int64 h5 = load_4(s + 16);
+ crypto_int64 h6 = load_3(s + 20) << 7;
+ crypto_int64 h7 = load_3(s + 23) << 5;
+ crypto_int64 h8 = load_3(s + 26) << 4;
+ crypto_int64 h9 = load_3(s + 29) << 2;
+ crypto_int64 carry0;
+ crypto_int64 carry1;
+ crypto_int64 carry2;
+ crypto_int64 carry3;
+ crypto_int64 carry4;
+ crypto_int64 carry5;
+ crypto_int64 carry6;
+ crypto_int64 carry7;
+ crypto_int64 carry8;
+ crypto_int64 carry9;
+
+ carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25;
+ carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25;
+ carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25;
+ carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25;
+ carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25;
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+ carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26;
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+ carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26;
+ carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26;
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+
+/*
+h = f * g
+Can overlap h with f or g.
+
+Preconditions:
+ |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+ |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+
+Postconditions:
+ |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+*/
+
+/*
+Notes on implementation strategy:
+
+Using schoolbook multiplication.
+Karatsuba would save a little in some cost models.
+
+Most multiplications by 2 and 19 are 32-bit precomputations;
+cheaper than 64-bit postcomputations.
+
+There is one remaining multiplication by 19 in the carry chain;
+one *19 precomputation can be merged into this,
+but the resulting data flow is considerably less clean.
+
+There are 12 carries below.
+10 of them are 2-way parallelizable and vectorizable.
+Can get away with 11 carries, but then data flow is much deeper.
+
+With tighter constraints on inputs can squeeze carries into int32.
+*/
+
+void fe_mul(fe h,fe f,fe g)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 g0 = g[0];
+ crypto_int32 g1 = g[1];
+ crypto_int32 g2 = g[2];
+ crypto_int32 g3 = g[3];
+ crypto_int32 g4 = g[4];
+ crypto_int32 g5 = g[5];
+ crypto_int32 g6 = g[6];
+ crypto_int32 g7 = g[7];
+ crypto_int32 g8 = g[8];
+ crypto_int32 g9 = g[9];
+ crypto_int32 g1_19 = 19 * g1; /* 1.4*2^29 */
+ crypto_int32 g2_19 = 19 * g2; /* 1.4*2^30; still ok */
+ crypto_int32 g3_19 = 19 * g3;
+ crypto_int32 g4_19 = 19 * g4;
+ crypto_int32 g5_19 = 19 * g5;
+ crypto_int32 g6_19 = 19 * g6;
+ crypto_int32 g7_19 = 19 * g7;
+ crypto_int32 g8_19 = 19 * g8;
+ crypto_int32 g9_19 = 19 * g9;
+ crypto_int32 f1_2 = 2 * f1;
+ crypto_int32 f3_2 = 2 * f3;
+ crypto_int32 f5_2 = 2 * f5;
+ crypto_int32 f7_2 = 2 * f7;
+ crypto_int32 f9_2 = 2 * f9;
+ crypto_int64 f0g0 = f0 * (crypto_int64) g0;
+ crypto_int64 f0g1 = f0 * (crypto_int64) g1;
+ crypto_int64 f0g2 = f0 * (crypto_int64) g2;
+ crypto_int64 f0g3 = f0 * (crypto_int64) g3;
+ crypto_int64 f0g4 = f0 * (crypto_int64) g4;
+ crypto_int64 f0g5 = f0 * (crypto_int64) g5;
+ crypto_int64 f0g6 = f0 * (crypto_int64) g6;
+ crypto_int64 f0g7 = f0 * (crypto_int64) g7;
+ crypto_int64 f0g8 = f0 * (crypto_int64) g8;
+ crypto_int64 f0g9 = f0 * (crypto_int64) g9;
+ crypto_int64 f1g0 = f1 * (crypto_int64) g0;
+ crypto_int64 f1g1_2 = f1_2 * (crypto_int64) g1;
+ crypto_int64 f1g2 = f1 * (crypto_int64) g2;
+ crypto_int64 f1g3_2 = f1_2 * (crypto_int64) g3;
+ crypto_int64 f1g4 = f1 * (crypto_int64) g4;
+ crypto_int64 f1g5_2 = f1_2 * (crypto_int64) g5;
+ crypto_int64 f1g6 = f1 * (crypto_int64) g6;
+ crypto_int64 f1g7_2 = f1_2 * (crypto_int64) g7;
+ crypto_int64 f1g8 = f1 * (crypto_int64) g8;
+ crypto_int64 f1g9_38 = f1_2 * (crypto_int64) g9_19;
+ crypto_int64 f2g0 = f2 * (crypto_int64) g0;
+ crypto_int64 f2g1 = f2 * (crypto_int64) g1;
+ crypto_int64 f2g2 = f2 * (crypto_int64) g2;
+ crypto_int64 f2g3 = f2 * (crypto_int64) g3;
+ crypto_int64 f2g4 = f2 * (crypto_int64) g4;
+ crypto_int64 f2g5 = f2 * (crypto_int64) g5;
+ crypto_int64 f2g6 = f2 * (crypto_int64) g6;
+ crypto_int64 f2g7 = f2 * (crypto_int64) g7;
+ crypto_int64 f2g8_19 = f2 * (crypto_int64) g8_19;
+ crypto_int64 f2g9_19 = f2 * (crypto_int64) g9_19;
+ crypto_int64 f3g0 = f3 * (crypto_int64) g0;
+ crypto_int64 f3g1_2 = f3_2 * (crypto_int64) g1;
+ crypto_int64 f3g2 = f3 * (crypto_int64) g2;
+ crypto_int64 f3g3_2 = f3_2 * (crypto_int64) g3;
+ crypto_int64 f3g4 = f3 * (crypto_int64) g4;
+ crypto_int64 f3g5_2 = f3_2 * (crypto_int64) g5;
+ crypto_int64 f3g6 = f3 * (crypto_int64) g6;
+ crypto_int64 f3g7_38 = f3_2 * (crypto_int64) g7_19;
+ crypto_int64 f3g8_19 = f3 * (crypto_int64) g8_19;
+ crypto_int64 f3g9_38 = f3_2 * (crypto_int64) g9_19;
+ crypto_int64 f4g0 = f4 * (crypto_int64) g0;
+ crypto_int64 f4g1 = f4 * (crypto_int64) g1;
+ crypto_int64 f4g2 = f4 * (crypto_int64) g2;
+ crypto_int64 f4g3 = f4 * (crypto_int64) g3;
+ crypto_int64 f4g4 = f4 * (crypto_int64) g4;
+ crypto_int64 f4g5 = f4 * (crypto_int64) g5;
+ crypto_int64 f4g6_19 = f4 * (crypto_int64) g6_19;
+ crypto_int64 f4g7_19 = f4 * (crypto_int64) g7_19;
+ crypto_int64 f4g8_19 = f4 * (crypto_int64) g8_19;
+ crypto_int64 f4g9_19 = f4 * (crypto_int64) g9_19;
+ crypto_int64 f5g0 = f5 * (crypto_int64) g0;
+ crypto_int64 f5g1_2 = f5_2 * (crypto_int64) g1;
+ crypto_int64 f5g2 = f5 * (crypto_int64) g2;
+ crypto_int64 f5g3_2 = f5_2 * (crypto_int64) g3;
+ crypto_int64 f5g4 = f5 * (crypto_int64) g4;
+ crypto_int64 f5g5_38 = f5_2 * (crypto_int64) g5_19;
+ crypto_int64 f5g6_19 = f5 * (crypto_int64) g6_19;
+ crypto_int64 f5g7_38 = f5_2 * (crypto_int64) g7_19;
+ crypto_int64 f5g8_19 = f5 * (crypto_int64) g8_19;
+ crypto_int64 f5g9_38 = f5_2 * (crypto_int64) g9_19;
+ crypto_int64 f6g0 = f6 * (crypto_int64) g0;
+ crypto_int64 f6g1 = f6 * (crypto_int64) g1;
+ crypto_int64 f6g2 = f6 * (crypto_int64) g2;
+ crypto_int64 f6g3 = f6 * (crypto_int64) g3;
+ crypto_int64 f6g4_19 = f6 * (crypto_int64) g4_19;
+ crypto_int64 f6g5_19 = f6 * (crypto_int64) g5_19;
+ crypto_int64 f6g6_19 = f6 * (crypto_int64) g6_19;
+ crypto_int64 f6g7_19 = f6 * (crypto_int64) g7_19;
+ crypto_int64 f6g8_19 = f6 * (crypto_int64) g8_19;
+ crypto_int64 f6g9_19 = f6 * (crypto_int64) g9_19;
+ crypto_int64 f7g0 = f7 * (crypto_int64) g0;
+ crypto_int64 f7g1_2 = f7_2 * (crypto_int64) g1;
+ crypto_int64 f7g2 = f7 * (crypto_int64) g2;
+ crypto_int64 f7g3_38 = f7_2 * (crypto_int64) g3_19;
+ crypto_int64 f7g4_19 = f7 * (crypto_int64) g4_19;
+ crypto_int64 f7g5_38 = f7_2 * (crypto_int64) g5_19;
+ crypto_int64 f7g6_19 = f7 * (crypto_int64) g6_19;
+ crypto_int64 f7g7_38 = f7_2 * (crypto_int64) g7_19;
+ crypto_int64 f7g8_19 = f7 * (crypto_int64) g8_19;
+ crypto_int64 f7g9_38 = f7_2 * (crypto_int64) g9_19;
+ crypto_int64 f8g0 = f8 * (crypto_int64) g0;
+ crypto_int64 f8g1 = f8 * (crypto_int64) g1;
+ crypto_int64 f8g2_19 = f8 * (crypto_int64) g2_19;
+ crypto_int64 f8g3_19 = f8 * (crypto_int64) g3_19;
+ crypto_int64 f8g4_19 = f8 * (crypto_int64) g4_19;
+ crypto_int64 f8g5_19 = f8 * (crypto_int64) g5_19;
+ crypto_int64 f8g6_19 = f8 * (crypto_int64) g6_19;
+ crypto_int64 f8g7_19 = f8 * (crypto_int64) g7_19;
+ crypto_int64 f8g8_19 = f8 * (crypto_int64) g8_19;
+ crypto_int64 f8g9_19 = f8 * (crypto_int64) g9_19;
+ crypto_int64 f9g0 = f9 * (crypto_int64) g0;
+ crypto_int64 f9g1_38 = f9_2 * (crypto_int64) g1_19;
+ crypto_int64 f9g2_19 = f9 * (crypto_int64) g2_19;
+ crypto_int64 f9g3_38 = f9_2 * (crypto_int64) g3_19;
+ crypto_int64 f9g4_19 = f9 * (crypto_int64) g4_19;
+ crypto_int64 f9g5_38 = f9_2 * (crypto_int64) g5_19;
+ crypto_int64 f9g6_19 = f9 * (crypto_int64) g6_19;
+ crypto_int64 f9g7_38 = f9_2 * (crypto_int64) g7_19;
+ crypto_int64 f9g8_19 = f9 * (crypto_int64) g8_19;
+ crypto_int64 f9g9_38 = f9_2 * (crypto_int64) g9_19;
+ crypto_int64 h0 = f0g0+f1g9_38+f2g8_19+f3g7_38+f4g6_19+f5g5_38+f6g4_19+f7g3_38+f8g2_19+f9g1_38;
+ crypto_int64 h1 = f0g1+f1g0 +f2g9_19+f3g8_19+f4g7_19+f5g6_19+f6g5_19+f7g4_19+f8g3_19+f9g2_19;
+ crypto_int64 h2 = f0g2+f1g1_2 +f2g0 +f3g9_38+f4g8_19+f5g7_38+f6g6_19+f7g5_38+f8g4_19+f9g3_38;
+ crypto_int64 h3 = f0g3+f1g2 +f2g1 +f3g0 +f4g9_19+f5g8_19+f6g7_19+f7g6_19+f8g5_19+f9g4_19;
+ crypto_int64 h4 = f0g4+f1g3_2 +f2g2 +f3g1_2 +f4g0 +f5g9_38+f6g8_19+f7g7_38+f8g6_19+f9g5_38;
+ crypto_int64 h5 = f0g5+f1g4 +f2g3 +f3g2 +f4g1 +f5g0 +f6g9_19+f7g8_19+f8g7_19+f9g6_19;
+ crypto_int64 h6 = f0g6+f1g5_2 +f2g4 +f3g3_2 +f4g2 +f5g1_2 +f6g0 +f7g9_38+f8g8_19+f9g7_38;
+ crypto_int64 h7 = f0g7+f1g6 +f2g5 +f3g4 +f4g3 +f5g2 +f6g1 +f7g0 +f8g9_19+f9g8_19;
+ crypto_int64 h8 = f0g8+f1g7_2 +f2g6 +f3g5_2 +f4g4 +f5g3_2 +f6g2 +f7g1_2 +f8g0 +f9g9_38;
+ crypto_int64 h9 = f0g9+f1g8 +f2g7 +f3g6 +f4g5 +f5g4 +f6g3 +f7g2 +f8g1 +f9g0 ;
+ crypto_int64 carry0;
+ crypto_int64 carry1;
+ crypto_int64 carry2;
+ crypto_int64 carry3;
+ crypto_int64 carry4;
+ crypto_int64 carry5;
+ crypto_int64 carry6;
+ crypto_int64 carry7;
+ crypto_int64 carry8;
+ crypto_int64 carry9;
+
+ /*
+ |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
+ i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
+ |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
+ i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
+ */
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+ /* |h0| <= 2^25 */
+ /* |h4| <= 2^25 */
+ /* |h1| <= 1.51*2^58 */
+ /* |h5| <= 1.51*2^58 */
+
+ carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25;
+ carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25;
+ /* |h1| <= 2^24; from now on fits into int32 */
+ /* |h5| <= 2^24; from now on fits into int32 */
+ /* |h2| <= 1.21*2^59 */
+ /* |h6| <= 1.21*2^59 */
+
+ carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26;
+ carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26;
+ /* |h2| <= 2^25; from now on fits into int32 unchanged */
+ /* |h6| <= 2^25; from now on fits into int32 unchanged */
+ /* |h3| <= 1.51*2^58 */
+ /* |h7| <= 1.51*2^58 */
+
+ carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25;
+ carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25;
+ /* |h3| <= 2^24; from now on fits into int32 unchanged */
+ /* |h7| <= 2^24; from now on fits into int32 unchanged */
+ /* |h4| <= 1.52*2^33 */
+ /* |h8| <= 1.52*2^33 */
+
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+ carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26;
+ /* |h4| <= 2^25; from now on fits into int32 unchanged */
+ /* |h8| <= 2^25; from now on fits into int32 unchanged */
+ /* |h5| <= 1.01*2^24 */
+ /* |h9| <= 1.51*2^58 */
+
+ carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25;
+ /* |h9| <= 2^24; from now on fits into int32 unchanged */
+ /* |h0| <= 1.8*2^37 */
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+ /* |h0| <= 2^25; from now on fits into int32 unchanged */
+ /* |h1| <= 1.01*2^24 */
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+/*
+h = f * 121666
+Can overlap h with f.
+
+Preconditions:
+ |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+
+Postconditions:
+ |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+*/
+
+void fe_mul121666(fe h,fe f)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int64 h0 = f0 * (crypto_int64) 121666;
+ crypto_int64 h1 = f1 * (crypto_int64) 121666;
+ crypto_int64 h2 = f2 * (crypto_int64) 121666;
+ crypto_int64 h3 = f3 * (crypto_int64) 121666;
+ crypto_int64 h4 = f4 * (crypto_int64) 121666;
+ crypto_int64 h5 = f5 * (crypto_int64) 121666;
+ crypto_int64 h6 = f6 * (crypto_int64) 121666;
+ crypto_int64 h7 = f7 * (crypto_int64) 121666;
+ crypto_int64 h8 = f8 * (crypto_int64) 121666;
+ crypto_int64 h9 = f9 * (crypto_int64) 121666;
+ crypto_int64 carry0;
+ crypto_int64 carry1;
+ crypto_int64 carry2;
+ crypto_int64 carry3;
+ crypto_int64 carry4;
+ crypto_int64 carry5;
+ crypto_int64 carry6;
+ crypto_int64 carry7;
+ crypto_int64 carry8;
+ crypto_int64 carry9;
+
+ carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25;
+ carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25;
+ carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25;
+ carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25;
+ carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25;
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+ carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26;
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+ carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26;
+ carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26;
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+/*
+h = f * f
+Can overlap h with f.
+
+Preconditions:
+ |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+
+Postconditions:
+ |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+*/
+
+/*
+See fe_mul.c for discussion of implementation strategy.
+*/
+
+void fe_sq(fe h,fe f)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 f0_2 = 2 * f0;
+ crypto_int32 f1_2 = 2 * f1;
+ crypto_int32 f2_2 = 2 * f2;
+ crypto_int32 f3_2 = 2 * f3;
+ crypto_int32 f4_2 = 2 * f4;
+ crypto_int32 f5_2 = 2 * f5;
+ crypto_int32 f6_2 = 2 * f6;
+ crypto_int32 f7_2 = 2 * f7;
+ crypto_int32 f5_38 = 38 * f5; /* 1.31*2^30 */
+ crypto_int32 f6_19 = 19 * f6; /* 1.31*2^30 */
+ crypto_int32 f7_38 = 38 * f7; /* 1.31*2^30 */
+ crypto_int32 f8_19 = 19 * f8; /* 1.31*2^30 */
+ crypto_int32 f9_38 = 38 * f9; /* 1.31*2^30 */
+ crypto_int64 f0f0 = f0 * (crypto_int64) f0;
+ crypto_int64 f0f1_2 = f0_2 * (crypto_int64) f1;
+ crypto_int64 f0f2_2 = f0_2 * (crypto_int64) f2;
+ crypto_int64 f0f3_2 = f0_2 * (crypto_int64) f3;
+ crypto_int64 f0f4_2 = f0_2 * (crypto_int64) f4;
+ crypto_int64 f0f5_2 = f0_2 * (crypto_int64) f5;
+ crypto_int64 f0f6_2 = f0_2 * (crypto_int64) f6;
+ crypto_int64 f0f7_2 = f0_2 * (crypto_int64) f7;
+ crypto_int64 f0f8_2 = f0_2 * (crypto_int64) f8;
+ crypto_int64 f0f9_2 = f0_2 * (crypto_int64) f9;
+ crypto_int64 f1f1_2 = f1_2 * (crypto_int64) f1;
+ crypto_int64 f1f2_2 = f1_2 * (crypto_int64) f2;
+ crypto_int64 f1f3_4 = f1_2 * (crypto_int64) f3_2;
+ crypto_int64 f1f4_2 = f1_2 * (crypto_int64) f4;
+ crypto_int64 f1f5_4 = f1_2 * (crypto_int64) f5_2;
+ crypto_int64 f1f6_2 = f1_2 * (crypto_int64) f6;
+ crypto_int64 f1f7_4 = f1_2 * (crypto_int64) f7_2;
+ crypto_int64 f1f8_2 = f1_2 * (crypto_int64) f8;
+ crypto_int64 f1f9_76 = f1_2 * (crypto_int64) f9_38;
+ crypto_int64 f2f2 = f2 * (crypto_int64) f2;
+ crypto_int64 f2f3_2 = f2_2 * (crypto_int64) f3;
+ crypto_int64 f2f4_2 = f2_2 * (crypto_int64) f4;
+ crypto_int64 f2f5_2 = f2_2 * (crypto_int64) f5;
+ crypto_int64 f2f6_2 = f2_2 * (crypto_int64) f6;
+ crypto_int64 f2f7_2 = f2_2 * (crypto_int64) f7;
+ crypto_int64 f2f8_38 = f2_2 * (crypto_int64) f8_19;
+ crypto_int64 f2f9_38 = f2 * (crypto_int64) f9_38;
+ crypto_int64 f3f3_2 = f3_2 * (crypto_int64) f3;
+ crypto_int64 f3f4_2 = f3_2 * (crypto_int64) f4;
+ crypto_int64 f3f5_4 = f3_2 * (crypto_int64) f5_2;
+ crypto_int64 f3f6_2 = f3_2 * (crypto_int64) f6;
+ crypto_int64 f3f7_76 = f3_2 * (crypto_int64) f7_38;
+ crypto_int64 f3f8_38 = f3_2 * (crypto_int64) f8_19;
+ crypto_int64 f3f9_76 = f3_2 * (crypto_int64) f9_38;
+ crypto_int64 f4f4 = f4 * (crypto_int64) f4;
+ crypto_int64 f4f5_2 = f4_2 * (crypto_int64) f5;
+ crypto_int64 f4f6_38 = f4_2 * (crypto_int64) f6_19;
+ crypto_int64 f4f7_38 = f4 * (crypto_int64) f7_38;
+ crypto_int64 f4f8_38 = f4_2 * (crypto_int64) f8_19;
+ crypto_int64 f4f9_38 = f4 * (crypto_int64) f9_38;
+ crypto_int64 f5f5_38 = f5 * (crypto_int64) f5_38;
+ crypto_int64 f5f6_38 = f5_2 * (crypto_int64) f6_19;
+ crypto_int64 f5f7_76 = f5_2 * (crypto_int64) f7_38;
+ crypto_int64 f5f8_38 = f5_2 * (crypto_int64) f8_19;
+ crypto_int64 f5f9_76 = f5_2 * (crypto_int64) f9_38;
+ crypto_int64 f6f6_19 = f6 * (crypto_int64) f6_19;
+ crypto_int64 f6f7_38 = f6 * (crypto_int64) f7_38;
+ crypto_int64 f6f8_38 = f6_2 * (crypto_int64) f8_19;
+ crypto_int64 f6f9_38 = f6 * (crypto_int64) f9_38;
+ crypto_int64 f7f7_38 = f7 * (crypto_int64) f7_38;
+ crypto_int64 f7f8_38 = f7_2 * (crypto_int64) f8_19;
+ crypto_int64 f7f9_76 = f7_2 * (crypto_int64) f9_38;
+ crypto_int64 f8f8_19 = f8 * (crypto_int64) f8_19;
+ crypto_int64 f8f9_38 = f8 * (crypto_int64) f9_38;
+ crypto_int64 f9f9_38 = f9 * (crypto_int64) f9_38;
+ crypto_int64 h0 = f0f0 +f1f9_76+f2f8_38+f3f7_76+f4f6_38+f5f5_38;
+ crypto_int64 h1 = f0f1_2+f2f9_38+f3f8_38+f4f7_38+f5f6_38;
+ crypto_int64 h2 = f0f2_2+f1f1_2 +f3f9_76+f4f8_38+f5f7_76+f6f6_19;
+ crypto_int64 h3 = f0f3_2+f1f2_2 +f4f9_38+f5f8_38+f6f7_38;
+ crypto_int64 h4 = f0f4_2+f1f3_4 +f2f2 +f5f9_76+f6f8_38+f7f7_38;
+ crypto_int64 h5 = f0f5_2+f1f4_2 +f2f3_2 +f6f9_38+f7f8_38;
+ crypto_int64 h6 = f0f6_2+f1f5_4 +f2f4_2 +f3f3_2 +f7f9_76+f8f8_19;
+ crypto_int64 h7 = f0f7_2+f1f6_2 +f2f5_2 +f3f4_2 +f8f9_38;
+ crypto_int64 h8 = f0f8_2+f1f7_4 +f2f6_2 +f3f5_4 +f4f4 +f9f9_38;
+ crypto_int64 h9 = f0f9_2+f1f8_2 +f2f7_2 +f3f6_2 +f4f5_2;
+ crypto_int64 carry0;
+ crypto_int64 carry1;
+ crypto_int64 carry2;
+ crypto_int64 carry3;
+ crypto_int64 carry4;
+ crypto_int64 carry5;
+ crypto_int64 carry6;
+ crypto_int64 carry7;
+ crypto_int64 carry8;
+ crypto_int64 carry9;
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+
+ carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25;
+ carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25;
+
+ carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26;
+ carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26;
+
+ carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25;
+ carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25;
+
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+ carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26;
+
+ carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25;
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+/*
+h = f - g
+Can overlap h with f or g.
+
+Preconditions:
+ |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+ |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+
+Postconditions:
+ |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+*/
+
+void fe_sub(fe h,fe f,fe g)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 g0 = g[0];
+ crypto_int32 g1 = g[1];
+ crypto_int32 g2 = g[2];
+ crypto_int32 g3 = g[3];
+ crypto_int32 g4 = g[4];
+ crypto_int32 g5 = g[5];
+ crypto_int32 g6 = g[6];
+ crypto_int32 g7 = g[7];
+ crypto_int32 g8 = g[8];
+ crypto_int32 g9 = g[9];
+ crypto_int32 h0 = f0 - g0;
+ crypto_int32 h1 = f1 - g1;
+ crypto_int32 h2 = f2 - g2;
+ crypto_int32 h3 = f3 - g3;
+ crypto_int32 h4 = f4 - g4;
+ crypto_int32 h5 = f5 - g5;
+ crypto_int32 h6 = f6 - g6;
+ crypto_int32 h7 = f7 - g7;
+ crypto_int32 h8 = f8 - g8;
+ crypto_int32 h9 = f9 - g9;
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+/*
+Preconditions:
+ |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+
+Write p=2^255-19; q=floor(h/p).
+Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
+
+Proof:
+ Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
+ Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
+
+ Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
+ Then 0> 25;
+ q = (h0 + q) >> 26;
+ q = (h1 + q) >> 25;
+ q = (h2 + q) >> 26;
+ q = (h3 + q) >> 25;
+ q = (h4 + q) >> 26;
+ q = (h5 + q) >> 25;
+ q = (h6 + q) >> 26;
+ q = (h7 + q) >> 25;
+ q = (h8 + q) >> 26;
+ q = (h9 + q) >> 25;
+
+ /* Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. */
+ h0 += 19 * q;
+ /* Goal: Output h-2^255 q, which is between 0 and 2^255-20. */
+
+ carry0 = h0 >> 26; h1 += carry0; h0 -= carry0 << 26;
+ carry1 = h1 >> 25; h2 += carry1; h1 -= carry1 << 25;
+ carry2 = h2 >> 26; h3 += carry2; h2 -= carry2 << 26;
+ carry3 = h3 >> 25; h4 += carry3; h3 -= carry3 << 25;
+ carry4 = h4 >> 26; h5 += carry4; h4 -= carry4 << 26;
+ carry5 = h5 >> 25; h6 += carry5; h5 -= carry5 << 25;
+ carry6 = h6 >> 26; h7 += carry6; h6 -= carry6 << 26;
+ carry7 = h7 >> 25; h8 += carry7; h7 -= carry7 << 25;
+ carry8 = h8 >> 26; h9 += carry8; h8 -= carry8 << 26;
+ carry9 = h9 >> 25; h9 -= carry9 << 25;
+ /* h10 = carry9 */
+
+ /*
+ Goal: Output h0+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
+ Have h0+...+2^230 h9 between 0 and 2^255-1;
+ evidently 2^255 h10-2^255 q = 0.
+ Goal: Output h0+...+2^230 h9.
+ */
+
+ s[0] = h0 >> 0;
+ s[1] = h0 >> 8;
+ s[2] = h0 >> 16;
+ s[3] = (h0 >> 24) | (h1 << 2);
+ s[4] = h1 >> 6;
+ s[5] = h1 >> 14;
+ s[6] = (h1 >> 22) | (h2 << 3);
+ s[7] = h2 >> 5;
+ s[8] = h2 >> 13;
+ s[9] = (h2 >> 21) | (h3 << 5);
+ s[10] = h3 >> 3;
+ s[11] = h3 >> 11;
+ s[12] = (h3 >> 19) | (h4 << 6);
+ s[13] = h4 >> 2;
+ s[14] = h4 >> 10;
+ s[15] = h4 >> 18;
+ s[16] = h5 >> 0;
+ s[17] = h5 >> 8;
+ s[18] = h5 >> 16;
+ s[19] = (h5 >> 24) | (h6 << 1);
+ s[20] = h6 >> 7;
+ s[21] = h6 >> 15;
+ s[22] = (h6 >> 23) | (h7 << 3);
+ s[23] = h7 >> 5;
+ s[24] = h7 >> 13;
+ s[25] = (h7 >> 21) | (h8 << 4);
+ s[26] = h8 >> 4;
+ s[27] = h8 >> 12;
+ s[28] = (h8 >> 20) | (h9 << 6);
+ s[29] = h9 >> 2;
+ s[30] = h9 >> 10;
+ s[31] = h9 >> 18;
+}
+
+void fe_invert(fe out,fe z)
+{
+ fe t0;
+ fe t1;
+ fe t2;
+ fe t3;
+ int i;
+
+
+/* qhasm: fe z1 */
+
+/* qhasm: fe z2 */
+
+/* qhasm: fe z8 */
+
+/* qhasm: fe z9 */
+
+/* qhasm: fe z11 */
+
+/* qhasm: fe z22 */
+
+/* qhasm: fe z_5_0 */
+
+/* qhasm: fe z_10_5 */
+
+/* qhasm: fe z_10_0 */
+
+/* qhasm: fe z_20_10 */
+
+/* qhasm: fe z_20_0 */
+
+/* qhasm: fe z_40_20 */
+
+/* qhasm: fe z_40_0 */
+
+/* qhasm: fe z_50_10 */
+
+/* qhasm: fe z_50_0 */
+
+/* qhasm: fe z_100_50 */
+
+/* qhasm: fe z_100_0 */
+
+/* qhasm: fe z_200_100 */
+
+/* qhasm: fe z_200_0 */
+
+/* qhasm: fe z_250_50 */
+
+/* qhasm: fe z_250_0 */
+
+/* qhasm: fe z_255_5 */
+
+/* qhasm: fe z_255_21 */
+
+/* qhasm: enter pow225521 */
+
+/* qhasm: z2 = z1^2^1 */
+/* asm 1: fe_sq(>z2=fe#1,z2=fe#1,>z2=fe#1); */
+/* asm 2: fe_sq(>z2=t0,z2=t0,>z2=t0); */
+fe_sq(t0,z); for (i = 1;i < 1;++i) fe_sq(t0,t0);
+
+/* qhasm: z8 = z2^2^2 */
+/* asm 1: fe_sq(>z8=fe#2,z8=fe#2,>z8=fe#2); */
+/* asm 2: fe_sq(>z8=t1,z8=t1,>z8=t1); */
+fe_sq(t1,t0); for (i = 1;i < 2;++i) fe_sq(t1,t1);
+
+/* qhasm: z9 = z1*z8 */
+/* asm 1: fe_mul(>z9=fe#2,z9=t1,z11=fe#1,z11=t0,z22=fe#3,z22=fe#3,>z22=fe#3); */
+/* asm 2: fe_sq(>z22=t2,z22=t2,>z22=t2); */
+fe_sq(t2,t0); for (i = 1;i < 1;++i) fe_sq(t2,t2);
+
+/* qhasm: z_5_0 = z9*z22 */
+/* asm 1: fe_mul(>z_5_0=fe#2,z_5_0=t1,z_10_5=fe#3,z_10_5=fe#3,>z_10_5=fe#3); */
+/* asm 2: fe_sq(>z_10_5=t2,z_10_5=t2,>z_10_5=t2); */
+fe_sq(t2,t1); for (i = 1;i < 5;++i) fe_sq(t2,t2);
+
+/* qhasm: z_10_0 = z_10_5*z_5_0 */
+/* asm 1: fe_mul(>z_10_0=fe#2,z_10_0=t1,z_20_10=fe#3,z_20_10=fe#3,>z_20_10=fe#3); */
+/* asm 2: fe_sq(>z_20_10=t2,z_20_10=t2,>z_20_10=t2); */
+fe_sq(t2,t1); for (i = 1;i < 10;++i) fe_sq(t2,t2);
+
+/* qhasm: z_20_0 = z_20_10*z_10_0 */
+/* asm 1: fe_mul(>z_20_0=fe#3,z_20_0=t2,z_40_20=fe#4,z_40_20=fe#4,>z_40_20=fe#4); */
+/* asm 2: fe_sq(>z_40_20=t3,z_40_20=t3,>z_40_20=t3); */
+fe_sq(t3,t2); for (i = 1;i < 20;++i) fe_sq(t3,t3);
+
+/* qhasm: z_40_0 = z_40_20*z_20_0 */
+/* asm 1: fe_mul(>z_40_0=fe#3,z_40_0=t2,z_50_10=fe#3,z_50_10=fe#3,>z_50_10=fe#3); */
+/* asm 2: fe_sq(>z_50_10=t2,z_50_10=t2,>z_50_10=t2); */
+fe_sq(t2,t2); for (i = 1;i < 10;++i) fe_sq(t2,t2);
+
+/* qhasm: z_50_0 = z_50_10*z_10_0 */
+/* asm 1: fe_mul(>z_50_0=fe#2,z_50_0=t1,z_100_50=fe#3,z_100_50=fe#3,>z_100_50=fe#3); */
+/* asm 2: fe_sq(>z_100_50=t2,z_100_50=t2,>z_100_50=t2); */
+fe_sq(t2,t1); for (i = 1;i < 50;++i) fe_sq(t2,t2);
+
+/* qhasm: z_100_0 = z_100_50*z_50_0 */
+/* asm 1: fe_mul(>z_100_0=fe#3,z_100_0=t2,z_200_100=fe#4,z_200_100=fe#4,>z_200_100=fe#4); */
+/* asm 2: fe_sq(>z_200_100=t3,z_200_100=t3,>z_200_100=t3); */
+fe_sq(t3,t2); for (i = 1;i < 100;++i) fe_sq(t3,t3);
+
+/* qhasm: z_200_0 = z_200_100*z_100_0 */
+/* asm 1: fe_mul(>z_200_0=fe#3,z_200_0=t2,z_250_50=fe#3,z_250_50=fe#3,>z_250_50=fe#3); */
+/* asm 2: fe_sq(>z_250_50=t2,z_250_50=t2,>z_250_50=t2); */
+fe_sq(t2,t2); for (i = 1;i < 50;++i) fe_sq(t2,t2);
+
+/* qhasm: z_250_0 = z_250_50*z_50_0 */
+/* asm 1: fe_mul(>z_250_0=fe#2,z_250_0=t1,z_255_5=fe#2,z_255_5=fe#2,>z_255_5=fe#2); */
+/* asm 2: fe_sq(>z_255_5=t1,z_255_5=t1,>z_255_5=t1); */
+fe_sq(t1,t1); for (i = 1;i < 5;++i) fe_sq(t1,t1);
+
+/* qhasm: z_255_21 = z_255_5*z11 */
+/* asm 1: fe_mul(>z_255_21=fe#12,z_255_21=out,= 0;--pos) {
+ b = e[pos / 8] >> (pos & 7);
+ b &= 1;
+ swap ^= b;
+ fe_cswap(x2,x3,swap);
+ fe_cswap(z2,z3,swap);
+ swap = b;
+/* qhasm: fe X2 */
+
+/* qhasm: fe Z2 */
+
+/* qhasm: fe X3 */
+
+/* qhasm: fe Z3 */
+
+/* qhasm: fe X4 */
+
+/* qhasm: fe Z4 */
+
+/* qhasm: fe X5 */
+
+/* qhasm: fe Z5 */
+
+/* qhasm: fe A */
+
+/* qhasm: fe B */
+
+/* qhasm: fe C */
+
+/* qhasm: fe D */
+
+/* qhasm: fe E */
+
+/* qhasm: fe AA */
+
+/* qhasm: fe BB */
+
+/* qhasm: fe DA */
+
+/* qhasm: fe CB */
+
+/* qhasm: fe t0 */
+
+/* qhasm: fe t1 */
+
+/* qhasm: fe t2 */
+
+/* qhasm: fe t3 */
+
+/* qhasm: fe t4 */
+
+/* qhasm: enter ladder */
+
+/* qhasm: D = X3-Z3 */
+/* asm 1: fe_sub(>D=fe#5,D=tmp0,B=fe#6,B=tmp1,A=fe#1,A=x2,C=fe#2,C=z2,DA=fe#4,DA=z3,CB=fe#2,CB=z2,BB=fe#5,BB=tmp0,AA=fe#6,AA=tmp1,t0=fe#3,t0=x3,t1=fe#2,t1=z2,X4=fe#1,X4=x2,E=fe#6,E=tmp1,t2=fe#2,t2=z2,t3=fe#4,t3=z3,X5=fe#3,X5=x3,t4=fe#5,t4=tmp0,Z5=fe#4,x1,Z5=z3,x1,Z4=fe#2,Z4=z2,
+
+typedef unsigned char ed25519_signature[64];
+typedef unsigned char ed25519_public_key[32];
+typedef unsigned char ed25519_secret_key[32];
+
+typedef unsigned char curved25519_key[32];
+
+void ed25519_publickey(const ed25519_secret_key sk, ed25519_public_key pk);
+int ed25519_sign_open(const unsigned char *m, size_t mlen, const ed25519_public_key pk, const ed25519_signature RS);
+void ed25519_sign(const unsigned char *m, size_t mlen, const ed25519_secret_key sk, const ed25519_public_key pk, ed25519_signature RS);
+
+int ed25519_sign_open_batch(const unsigned char **m, size_t *mlen, const unsigned char **pk, const unsigned char **RS, size_t num, int *valid);
+
+void ed25519_randombytes_unsafe(void *out, size_t count);
+
+void curved25519_scalarmult_basepoint(curved25519_key pk, const curved25519_key e);
+
+#if defined(ED25519_SSE2)
+void ed25519_publickey_sse2(const ed25519_secret_key sk, ed25519_public_key pk);
+int ed25519_sign_open_sse2(const unsigned char *m, size_t mlen, const ed25519_public_key pk, const ed25519_signature RS);
+void ed25519_sign_sse2(const unsigned char *m, size_t mlen, const ed25519_secret_key sk, const ed25519_public_key pk, ed25519_signature RS);
+
+int ed25519_sign_open_batch_sse2(const unsigned char **m, size_t *mlen, const unsigned char **pk, const unsigned char **RS, size_t num, int *valid);
+
+void ed25519_randombytes_unsafe_sse2(void *out, size_t count);
+
+void curved25519_scalarmult_basepoint_sse2(curved25519_key pk, const curved25519_key e);
+#endif
+
+#endif // ED25519_H
diff --git a/src/ext/ed25519/donna/fuzz/ed25519-ref10.c b/src/ext/ed25519/donna/fuzz/ed25519-ref10.c
new file mode 100644
index 0000000000..a8e802df29
--- /dev/null
+++ b/src/ext/ed25519/donna/fuzz/ed25519-ref10.c
@@ -0,0 +1,4647 @@
+#include
+#include
+#include
+
+static int crypto_verify_32(const unsigned char *x,const unsigned char *y)
+{
+ unsigned int differentbits = 0;
+#define F(i) differentbits |= x[i] ^ y[i];
+ F(0)
+ F(1)
+ F(2)
+ F(3)
+ F(4)
+ F(5)
+ F(6)
+ F(7)
+ F(8)
+ F(9)
+ F(10)
+ F(11)
+ F(12)
+ F(13)
+ F(14)
+ F(15)
+ F(16)
+ F(17)
+ F(18)
+ F(19)
+ F(20)
+ F(21)
+ F(22)
+ F(23)
+ F(24)
+ F(25)
+ F(26)
+ F(27)
+ F(28)
+ F(29)
+ F(30)
+ F(31)
+ return (1 & ((differentbits - 1) >> 8)) - 1;
+}
+
+#if defined(ED25519_REFHASH)
+
+/* reference/slow SHA-512. really, do not use this */
+
+#define HASH_BLOCK_SIZE 128
+#define HASH_DIGEST_SIZE 64
+
+typedef struct sha512_state_t {
+ uint64_t H[8];
+ uint64_t T[2];
+ uint32_t leftover;
+ uint8_t buffer[HASH_BLOCK_SIZE];
+} sha512_state;
+
+typedef sha512_state ed25519_hash_context;
+
+static const uint64_t sha512_constants[80] = {
+ 0x428a2f98d728ae22ull, 0x7137449123ef65cdull, 0xb5c0fbcfec4d3b2full, 0xe9b5dba58189dbbcull,
+ 0x3956c25bf348b538ull, 0x59f111f1b605d019ull, 0x923f82a4af194f9bull, 0xab1c5ed5da6d8118ull,
+ 0xd807aa98a3030242ull, 0x12835b0145706fbeull, 0x243185be4ee4b28cull, 0x550c7dc3d5ffb4e2ull,
+ 0x72be5d74f27b896full, 0x80deb1fe3b1696b1ull, 0x9bdc06a725c71235ull, 0xc19bf174cf692694ull,
+ 0xe49b69c19ef14ad2ull, 0xefbe4786384f25e3ull, 0x0fc19dc68b8cd5b5ull, 0x240ca1cc77ac9c65ull,
+ 0x2de92c6f592b0275ull, 0x4a7484aa6ea6e483ull, 0x5cb0a9dcbd41fbd4ull, 0x76f988da831153b5ull,
+ 0x983e5152ee66dfabull, 0xa831c66d2db43210ull, 0xb00327c898fb213full, 0xbf597fc7beef0ee4ull,
+ 0xc6e00bf33da88fc2ull, 0xd5a79147930aa725ull, 0x06ca6351e003826full, 0x142929670a0e6e70ull,
+ 0x27b70a8546d22ffcull, 0x2e1b21385c26c926ull, 0x4d2c6dfc5ac42aedull, 0x53380d139d95b3dfull,
+ 0x650a73548baf63deull, 0x766a0abb3c77b2a8ull, 0x81c2c92e47edaee6ull, 0x92722c851482353bull,
+ 0xa2bfe8a14cf10364ull, 0xa81a664bbc423001ull, 0xc24b8b70d0f89791ull, 0xc76c51a30654be30ull,
+ 0xd192e819d6ef5218ull, 0xd69906245565a910ull, 0xf40e35855771202aull, 0x106aa07032bbd1b8ull,
+ 0x19a4c116b8d2d0c8ull, 0x1e376c085141ab53ull, 0x2748774cdf8eeb99ull, 0x34b0bcb5e19b48a8ull,
+ 0x391c0cb3c5c95a63ull, 0x4ed8aa4ae3418acbull, 0x5b9cca4f7763e373ull, 0x682e6ff3d6b2b8a3ull,
+ 0x748f82ee5defb2fcull, 0x78a5636f43172f60ull, 0x84c87814a1f0ab72ull, 0x8cc702081a6439ecull,
+ 0x90befffa23631e28ull, 0xa4506cebde82bde9ull, 0xbef9a3f7b2c67915ull, 0xc67178f2e372532bull,
+ 0xca273eceea26619cull, 0xd186b8c721c0c207ull, 0xeada7dd6cde0eb1eull, 0xf57d4f7fee6ed178ull,
+ 0x06f067aa72176fbaull, 0x0a637dc5a2c898a6ull, 0x113f9804bef90daeull, 0x1b710b35131c471bull,
+ 0x28db77f523047d84ull, 0x32caab7b40c72493ull, 0x3c9ebe0a15c9bebcull, 0x431d67c49c100d4cull,
+ 0x4cc5d4becb3e42b6ull, 0x597f299cfc657e2aull, 0x5fcb6fab3ad6faecull, 0x6c44198c4a475817ull
+};
+
+static uint64_t
+sha512_ROTR64(uint64_t x, int k) {
+ return (x >> k) | (x << (64 - k));
+}
+
+static uint64_t
+sha512_LOAD64_BE(const uint8_t *p) {
+ return
+ ((uint64_t)p[0] << 56) |
+ ((uint64_t)p[1] << 48) |
+ ((uint64_t)p[2] << 40) |
+ ((uint64_t)p[3] << 32) |
+ ((uint64_t)p[4] << 24) |
+ ((uint64_t)p[5] << 16) |
+ ((uint64_t)p[6] << 8) |
+ ((uint64_t)p[7] );
+}
+
+static void
+sha512_STORE64_BE(uint8_t *p, uint64_t v) {
+ p[0] = (uint8_t)(v >> 56);
+ p[1] = (uint8_t)(v >> 48);
+ p[2] = (uint8_t)(v >> 40);
+ p[3] = (uint8_t)(v >> 32);
+ p[4] = (uint8_t)(v >> 24);
+ p[5] = (uint8_t)(v >> 16);
+ p[6] = (uint8_t)(v >> 8);
+ p[7] = (uint8_t)(v );
+}
+
+#define Ch(x,y,z) (z ^ (x & (y ^ z)))
+#define Maj(x,y,z) (((x | y) & z) | (x & y))
+#define S0(x) (sha512_ROTR64(x, 28) ^ sha512_ROTR64(x, 34) ^ sha512_ROTR64(x, 39))
+#define S1(x) (sha512_ROTR64(x, 14) ^ sha512_ROTR64(x, 18) ^ sha512_ROTR64(x, 41))
+#define G0(x) (sha512_ROTR64(x, 1) ^ sha512_ROTR64(x, 8) ^ (x >> 7))
+#define G1(x) (sha512_ROTR64(x, 19) ^ sha512_ROTR64(x, 61) ^ (x >> 6))
+#define W0(in,i) (sha512_LOAD64_BE(&in[i * 8]))
+#define W1(i) (G1(w[i - 2]) + w[i - 7] + G0(w[i - 15]) + w[i - 16])
+#define STEP(i) \
+ t1 = S0(r[0]) + Maj(r[0], r[1], r[2]); \
+ t0 = r[7] + S1(r[4]) + Ch(r[4], r[5], r[6]) + sha512_constants[i] + w[i]; \
+ r[7] = r[6]; \
+ r[6] = r[5]; \
+ r[5] = r[4]; \
+ r[4] = r[3] + t0; \
+ r[3] = r[2]; \
+ r[2] = r[1]; \
+ r[1] = r[0]; \
+ r[0] = t0 + t1;
+
+static void
+sha512_blocks(sha512_state *S, const uint8_t *in, size_t blocks) {
+ uint64_t r[8], w[80], t0, t1;
+ size_t i;
+
+ for (i = 0; i < 8; i++) r[i] = S->H[i];
+
+ while (blocks--) {
+ for (i = 0; i < 16; i++) { w[i] = W0(in, i); }
+ for (i = 16; i < 80; i++) { w[i] = W1(i); }
+ for (i = 0; i < 80; i++) { STEP(i); }
+ for (i = 0; i < 8; i++) { r[i] += S->H[i]; S->H[i] = r[i]; }
+ S->T[0] += HASH_BLOCK_SIZE * 8;
+ S->T[1] += (!S->T[0]) ? 1 : 0;
+ in += HASH_BLOCK_SIZE;
+ }
+}
+
+static void
+ed25519_hash_init(sha512_state *S) {
+ S->H[0] = 0x6a09e667f3bcc908ull;
+ S->H[1] = 0xbb67ae8584caa73bull;
+ S->H[2] = 0x3c6ef372fe94f82bull;
+ S->H[3] = 0xa54ff53a5f1d36f1ull;
+ S->H[4] = 0x510e527fade682d1ull;
+ S->H[5] = 0x9b05688c2b3e6c1full;
+ S->H[6] = 0x1f83d9abfb41bd6bull;
+ S->H[7] = 0x5be0cd19137e2179ull;
+ S->T[0] = 0;
+ S->T[1] = 0;
+ S->leftover = 0;
+}
+
+static void
+ed25519_hash_update(sha512_state *S, const uint8_t *in, size_t inlen) {
+ size_t blocks, want;
+
+ /* handle the previous data */
+ if (S->leftover) {
+ want = (HASH_BLOCK_SIZE - S->leftover);
+ want = (want < inlen) ? want : inlen;
+ memcpy(S->buffer + S->leftover, in, want);
+ S->leftover += (uint32_t)want;
+ if (S->leftover < HASH_BLOCK_SIZE)
+ return;
+ in += want;
+ inlen -= want;
+ sha512_blocks(S, S->buffer, 1);
+ }
+
+ /* handle the current data */
+ blocks = (inlen & ~(HASH_BLOCK_SIZE - 1));
+ S->leftover = (uint32_t)(inlen - blocks);
+ if (blocks) {
+ sha512_blocks(S, in, blocks / HASH_BLOCK_SIZE);
+ in += blocks;
+ }
+
+ /* handle leftover data */
+ if (S->leftover)
+ memcpy(S->buffer, in, S->leftover);
+}
+
+static void
+ed25519_hash_final(sha512_state *S, uint8_t *hash) {
+ uint64_t t0 = S->T[0] + (S->leftover * 8), t1 = S->T[1];
+
+ S->buffer[S->leftover] = 0x80;
+ if (S->leftover <= 111) {
+ memset(S->buffer + S->leftover + 1, 0, 111 - S->leftover);
+ } else {
+ memset(S->buffer + S->leftover + 1, 0, 127 - S->leftover);
+ sha512_blocks(S, S->buffer, 1);
+ memset(S->buffer, 0, 112);
+ }
+
+ sha512_STORE64_BE(S->buffer + 112, t1);
+ sha512_STORE64_BE(S->buffer + 120, t0);
+ sha512_blocks(S, S->buffer, 1);
+
+ sha512_STORE64_BE(&hash[ 0], S->H[0]);
+ sha512_STORE64_BE(&hash[ 8], S->H[1]);
+ sha512_STORE64_BE(&hash[16], S->H[2]);
+ sha512_STORE64_BE(&hash[24], S->H[3]);
+ sha512_STORE64_BE(&hash[32], S->H[4]);
+ sha512_STORE64_BE(&hash[40], S->H[5]);
+ sha512_STORE64_BE(&hash[48], S->H[6]);
+ sha512_STORE64_BE(&hash[56], S->H[7]);
+}
+
+static void
+crypto_hash_sha512(unsigned char *hash, const unsigned char *in, size_t inlen) {
+ ed25519_hash_context ctx;
+ ed25519_hash_init(&ctx);
+ ed25519_hash_update(&ctx, in, inlen);
+ ed25519_hash_final(&ctx, hash);
+}
+
+#else
+
+#include
+
+static void
+crypto_hash_sha512(unsigned char *hash, const unsigned char *in, size_t inlen) {
+ SHA512(in, inlen, hash);
+}
+
+#endif
+
+
+
+
+typedef int32_t crypto_int32;
+typedef uint32_t crypto_uint32;
+typedef int64_t crypto_int64;
+typedef uint64_t crypto_uint64;
+
+typedef crypto_int32 fe[10];
+
+/*
+h = 0
+*/
+
+static void fe_0(fe h)
+{
+ h[0] = 0;
+ h[1] = 0;
+ h[2] = 0;
+ h[3] = 0;
+ h[4] = 0;
+ h[5] = 0;
+ h[6] = 0;
+ h[7] = 0;
+ h[8] = 0;
+ h[9] = 0;
+}
+
+/*
+h = 1
+*/
+
+static void fe_1(fe h)
+{
+ h[0] = 1;
+ h[1] = 0;
+ h[2] = 0;
+ h[3] = 0;
+ h[4] = 0;
+ h[5] = 0;
+ h[6] = 0;
+ h[7] = 0;
+ h[8] = 0;
+ h[9] = 0;
+}
+
+/*
+h = f + g
+Can overlap h with f or g.
+
+Preconditions:
+ |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+ |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+
+Postconditions:
+ |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+*/
+
+static void fe_add(fe h,const fe f,const fe g)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 g0 = g[0];
+ crypto_int32 g1 = g[1];
+ crypto_int32 g2 = g[2];
+ crypto_int32 g3 = g[3];
+ crypto_int32 g4 = g[4];
+ crypto_int32 g5 = g[5];
+ crypto_int32 g6 = g[6];
+ crypto_int32 g7 = g[7];
+ crypto_int32 g8 = g[8];
+ crypto_int32 g9 = g[9];
+ crypto_int32 h0 = f0 + g0;
+ crypto_int32 h1 = f1 + g1;
+ crypto_int32 h2 = f2 + g2;
+ crypto_int32 h3 = f3 + g3;
+ crypto_int32 h4 = f4 + g4;
+ crypto_int32 h5 = f5 + g5;
+ crypto_int32 h6 = f6 + g6;
+ crypto_int32 h7 = f7 + g7;
+ crypto_int32 h8 = f8 + g8;
+ crypto_int32 h9 = f9 + g9;
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+
+/*
+Replace (f,g) with (g,g) if b == 1;
+replace (f,g) with (f,g) if b == 0.
+
+Preconditions: b in {0,1}.
+*/
+
+static void fe_cmov(fe f,const fe g,unsigned int b)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 g0 = g[0];
+ crypto_int32 g1 = g[1];
+ crypto_int32 g2 = g[2];
+ crypto_int32 g3 = g[3];
+ crypto_int32 g4 = g[4];
+ crypto_int32 g5 = g[5];
+ crypto_int32 g6 = g[6];
+ crypto_int32 g7 = g[7];
+ crypto_int32 g8 = g[8];
+ crypto_int32 g9 = g[9];
+ crypto_int32 x0 = f0 ^ g0;
+ crypto_int32 x1 = f1 ^ g1;
+ crypto_int32 x2 = f2 ^ g2;
+ crypto_int32 x3 = f3 ^ g3;
+ crypto_int32 x4 = f4 ^ g4;
+ crypto_int32 x5 = f5 ^ g5;
+ crypto_int32 x6 = f6 ^ g6;
+ crypto_int32 x7 = f7 ^ g7;
+ crypto_int32 x8 = f8 ^ g8;
+ crypto_int32 x9 = f9 ^ g9;
+ b = -b;
+ x0 &= b;
+ x1 &= b;
+ x2 &= b;
+ x3 &= b;
+ x4 &= b;
+ x5 &= b;
+ x6 &= b;
+ x7 &= b;
+ x8 &= b;
+ x9 &= b;
+ f[0] = f0 ^ x0;
+ f[1] = f1 ^ x1;
+ f[2] = f2 ^ x2;
+ f[3] = f3 ^ x3;
+ f[4] = f4 ^ x4;
+ f[5] = f5 ^ x5;
+ f[6] = f6 ^ x6;
+ f[7] = f7 ^ x7;
+ f[8] = f8 ^ x8;
+ f[9] = f9 ^ x9;
+}
+
+
+/*
+h = f
+*/
+
+static void fe_copy(fe h,const fe f)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ h[0] = f0;
+ h[1] = f1;
+ h[2] = f2;
+ h[3] = f3;
+ h[4] = f4;
+ h[5] = f5;
+ h[6] = f6;
+ h[7] = f7;
+ h[8] = f8;
+ h[9] = f9;
+}
+
+
+static crypto_uint64 load_3(const unsigned char *in)
+{
+ crypto_uint64 result;
+ result = (crypto_uint64) in[0];
+ result |= ((crypto_uint64) in[1]) << 8;
+ result |= ((crypto_uint64) in[2]) << 16;
+ return result;
+}
+
+static crypto_uint64 load_4(const unsigned char *in)
+{
+ crypto_uint64 result;
+ result = (crypto_uint64) in[0];
+ result |= ((crypto_uint64) in[1]) << 8;
+ result |= ((crypto_uint64) in[2]) << 16;
+ result |= ((crypto_uint64) in[3]) << 24;
+ return result;
+}
+
+/*
+Ignores top bit of h.
+*/
+
+static void fe_frombytes(fe h,const unsigned char *s)
+{
+ crypto_int64 h0 = load_4(s);
+ crypto_int64 h1 = load_3(s + 4) << 6;
+ crypto_int64 h2 = load_3(s + 7) << 5;
+ crypto_int64 h3 = load_3(s + 10) << 3;
+ crypto_int64 h4 = load_3(s + 13) << 2;
+ crypto_int64 h5 = load_4(s + 16);
+ crypto_int64 h6 = load_3(s + 20) << 7;
+ crypto_int64 h7 = load_3(s + 23) << 5;
+ crypto_int64 h8 = load_3(s + 26) << 4;
+ crypto_int64 h9 = (load_3(s + 29) & 8388607) << 2;
+ crypto_int64 carry0;
+ crypto_int64 carry1;
+ crypto_int64 carry2;
+ crypto_int64 carry3;
+ crypto_int64 carry4;
+ crypto_int64 carry5;
+ crypto_int64 carry6;
+ crypto_int64 carry7;
+ crypto_int64 carry8;
+ crypto_int64 carry9;
+
+ carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25;
+ carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25;
+ carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25;
+ carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25;
+ carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25;
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+ carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26;
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+ carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26;
+ carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26;
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+/*
+Preconditions:
+ |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+
+Write p=2^255-19; q=floor(h/p).
+Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
+
+Proof:
+ Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
+ Also have |h-2^230 h9|<2^231 so |19 2^(-255)(h-2^230 h9)|<1/4.
+
+ Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
+ Then 0> 25;
+ q = (h0 + q) >> 26;
+ q = (h1 + q) >> 25;
+ q = (h2 + q) >> 26;
+ q = (h3 + q) >> 25;
+ q = (h4 + q) >> 26;
+ q = (h5 + q) >> 25;
+ q = (h6 + q) >> 26;
+ q = (h7 + q) >> 25;
+ q = (h8 + q) >> 26;
+ q = (h9 + q) >> 25;
+
+ /* Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. */
+ h0 += 19 * q;
+ /* Goal: Output h-2^255 q, which is between 0 and 2^255-20. */
+
+ carry0 = h0 >> 26; h1 += carry0; h0 -= carry0 << 26;
+ carry1 = h1 >> 25; h2 += carry1; h1 -= carry1 << 25;
+ carry2 = h2 >> 26; h3 += carry2; h2 -= carry2 << 26;
+ carry3 = h3 >> 25; h4 += carry3; h3 -= carry3 << 25;
+ carry4 = h4 >> 26; h5 += carry4; h4 -= carry4 << 26;
+ carry5 = h5 >> 25; h6 += carry5; h5 -= carry5 << 25;
+ carry6 = h6 >> 26; h7 += carry6; h6 -= carry6 << 26;
+ carry7 = h7 >> 25; h8 += carry7; h7 -= carry7 << 25;
+ carry8 = h8 >> 26; h9 += carry8; h8 -= carry8 << 26;
+ carry9 = h9 >> 25; h9 -= carry9 << 25;
+ /* h10 = carry9 */
+
+ /*
+ Goal: Output h0+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
+ Have h0+...+2^230 h9 between 0 and 2^255-1;
+ evidently 2^255 h10-2^255 q = 0.
+ Goal: Output h0+...+2^230 h9.
+ */
+
+ s[0] = h0 >> 0;
+ s[1] = h0 >> 8;
+ s[2] = h0 >> 16;
+ s[3] = (h0 >> 24) | (h1 << 2);
+ s[4] = h1 >> 6;
+ s[5] = h1 >> 14;
+ s[6] = (h1 >> 22) | (h2 << 3);
+ s[7] = h2 >> 5;
+ s[8] = h2 >> 13;
+ s[9] = (h2 >> 21) | (h3 << 5);
+ s[10] = h3 >> 3;
+ s[11] = h3 >> 11;
+ s[12] = (h3 >> 19) | (h4 << 6);
+ s[13] = h4 >> 2;
+ s[14] = h4 >> 10;
+ s[15] = h4 >> 18;
+ s[16] = h5 >> 0;
+ s[17] = h5 >> 8;
+ s[18] = h5 >> 16;
+ s[19] = (h5 >> 24) | (h6 << 1);
+ s[20] = h6 >> 7;
+ s[21] = h6 >> 15;
+ s[22] = (h6 >> 23) | (h7 << 3);
+ s[23] = h7 >> 5;
+ s[24] = h7 >> 13;
+ s[25] = (h7 >> 21) | (h8 << 4);
+ s[26] = h8 >> 4;
+ s[27] = h8 >> 12;
+ s[28] = (h8 >> 20) | (h9 << 6);
+ s[29] = h9 >> 2;
+ s[30] = h9 >> 10;
+ s[31] = h9 >> 18;
+}
+
+
+/*
+h = f - g
+Can overlap h with f or g.
+
+Preconditions:
+ |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+ |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+
+Postconditions:
+ |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+*/
+
+static void fe_sub(fe h,const fe f,const fe g)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 g0 = g[0];
+ crypto_int32 g1 = g[1];
+ crypto_int32 g2 = g[2];
+ crypto_int32 g3 = g[3];
+ crypto_int32 g4 = g[4];
+ crypto_int32 g5 = g[5];
+ crypto_int32 g6 = g[6];
+ crypto_int32 g7 = g[7];
+ crypto_int32 g8 = g[8];
+ crypto_int32 g9 = g[9];
+ crypto_int32 h0 = f0 - g0;
+ crypto_int32 h1 = f1 - g1;
+ crypto_int32 h2 = f2 - g2;
+ crypto_int32 h3 = f3 - g3;
+ crypto_int32 h4 = f4 - g4;
+ crypto_int32 h5 = f5 - g5;
+ crypto_int32 h6 = f6 - g6;
+ crypto_int32 h7 = f7 - g7;
+ crypto_int32 h8 = f8 - g8;
+ crypto_int32 h9 = f9 - g9;
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+
+/*
+h = f * f
+Can overlap h with f.
+
+Preconditions:
+ |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
+
+Postconditions:
+ |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
+*/
+
+/*
+See fe_mul.c for discussion of implementation strategy.
+*/
+
+static void fe_sq(fe h,const fe f)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 f0_2 = 2 * f0;
+ crypto_int32 f1_2 = 2 * f1;
+ crypto_int32 f2_2 = 2 * f2;
+ crypto_int32 f3_2 = 2 * f3;
+ crypto_int32 f4_2 = 2 * f4;
+ crypto_int32 f5_2 = 2 * f5;
+ crypto_int32 f6_2 = 2 * f6;
+ crypto_int32 f7_2 = 2 * f7;
+ crypto_int32 f5_38 = 38 * f5; /* 1.959375*2^30 */
+ crypto_int32 f6_19 = 19 * f6; /* 1.959375*2^30 */
+ crypto_int32 f7_38 = 38 * f7; /* 1.959375*2^30 */
+ crypto_int32 f8_19 = 19 * f8; /* 1.959375*2^30 */
+ crypto_int32 f9_38 = 38 * f9; /* 1.959375*2^30 */
+ crypto_int64 f0f0 = f0 * (crypto_int64) f0;
+ crypto_int64 f0f1_2 = f0_2 * (crypto_int64) f1;
+ crypto_int64 f0f2_2 = f0_2 * (crypto_int64) f2;
+ crypto_int64 f0f3_2 = f0_2 * (crypto_int64) f3;
+ crypto_int64 f0f4_2 = f0_2 * (crypto_int64) f4;
+ crypto_int64 f0f5_2 = f0_2 * (crypto_int64) f5;
+ crypto_int64 f0f6_2 = f0_2 * (crypto_int64) f6;
+ crypto_int64 f0f7_2 = f0_2 * (crypto_int64) f7;
+ crypto_int64 f0f8_2 = f0_2 * (crypto_int64) f8;
+ crypto_int64 f0f9_2 = f0_2 * (crypto_int64) f9;
+ crypto_int64 f1f1_2 = f1_2 * (crypto_int64) f1;
+ crypto_int64 f1f2_2 = f1_2 * (crypto_int64) f2;
+ crypto_int64 f1f3_4 = f1_2 * (crypto_int64) f3_2;
+ crypto_int64 f1f4_2 = f1_2 * (crypto_int64) f4;
+ crypto_int64 f1f5_4 = f1_2 * (crypto_int64) f5_2;
+ crypto_int64 f1f6_2 = f1_2 * (crypto_int64) f6;
+ crypto_int64 f1f7_4 = f1_2 * (crypto_int64) f7_2;
+ crypto_int64 f1f8_2 = f1_2 * (crypto_int64) f8;
+ crypto_int64 f1f9_76 = f1_2 * (crypto_int64) f9_38;
+ crypto_int64 f2f2 = f2 * (crypto_int64) f2;
+ crypto_int64 f2f3_2 = f2_2 * (crypto_int64) f3;
+ crypto_int64 f2f4_2 = f2_2 * (crypto_int64) f4;
+ crypto_int64 f2f5_2 = f2_2 * (crypto_int64) f5;
+ crypto_int64 f2f6_2 = f2_2 * (crypto_int64) f6;
+ crypto_int64 f2f7_2 = f2_2 * (crypto_int64) f7;
+ crypto_int64 f2f8_38 = f2_2 * (crypto_int64) f8_19;
+ crypto_int64 f2f9_38 = f2 * (crypto_int64) f9_38;
+ crypto_int64 f3f3_2 = f3_2 * (crypto_int64) f3;
+ crypto_int64 f3f4_2 = f3_2 * (crypto_int64) f4;
+ crypto_int64 f3f5_4 = f3_2 * (crypto_int64) f5_2;
+ crypto_int64 f3f6_2 = f3_2 * (crypto_int64) f6;
+ crypto_int64 f3f7_76 = f3_2 * (crypto_int64) f7_38;
+ crypto_int64 f3f8_38 = f3_2 * (crypto_int64) f8_19;
+ crypto_int64 f3f9_76 = f3_2 * (crypto_int64) f9_38;
+ crypto_int64 f4f4 = f4 * (crypto_int64) f4;
+ crypto_int64 f4f5_2 = f4_2 * (crypto_int64) f5;
+ crypto_int64 f4f6_38 = f4_2 * (crypto_int64) f6_19;
+ crypto_int64 f4f7_38 = f4 * (crypto_int64) f7_38;
+ crypto_int64 f4f8_38 = f4_2 * (crypto_int64) f8_19;
+ crypto_int64 f4f9_38 = f4 * (crypto_int64) f9_38;
+ crypto_int64 f5f5_38 = f5 * (crypto_int64) f5_38;
+ crypto_int64 f5f6_38 = f5_2 * (crypto_int64) f6_19;
+ crypto_int64 f5f7_76 = f5_2 * (crypto_int64) f7_38;
+ crypto_int64 f5f8_38 = f5_2 * (crypto_int64) f8_19;
+ crypto_int64 f5f9_76 = f5_2 * (crypto_int64) f9_38;
+ crypto_int64 f6f6_19 = f6 * (crypto_int64) f6_19;
+ crypto_int64 f6f7_38 = f6 * (crypto_int64) f7_38;
+ crypto_int64 f6f8_38 = f6_2 * (crypto_int64) f8_19;
+ crypto_int64 f6f9_38 = f6 * (crypto_int64) f9_38;
+ crypto_int64 f7f7_38 = f7 * (crypto_int64) f7_38;
+ crypto_int64 f7f8_38 = f7_2 * (crypto_int64) f8_19;
+ crypto_int64 f7f9_76 = f7_2 * (crypto_int64) f9_38;
+ crypto_int64 f8f8_19 = f8 * (crypto_int64) f8_19;
+ crypto_int64 f8f9_38 = f8 * (crypto_int64) f9_38;
+ crypto_int64 f9f9_38 = f9 * (crypto_int64) f9_38;
+ crypto_int64 h0 = f0f0 +f1f9_76+f2f8_38+f3f7_76+f4f6_38+f5f5_38;
+ crypto_int64 h1 = f0f1_2+f2f9_38+f3f8_38+f4f7_38+f5f6_38;
+ crypto_int64 h2 = f0f2_2+f1f1_2 +f3f9_76+f4f8_38+f5f7_76+f6f6_19;
+ crypto_int64 h3 = f0f3_2+f1f2_2 +f4f9_38+f5f8_38+f6f7_38;
+ crypto_int64 h4 = f0f4_2+f1f3_4 +f2f2 +f5f9_76+f6f8_38+f7f7_38;
+ crypto_int64 h5 = f0f5_2+f1f4_2 +f2f3_2 +f6f9_38+f7f8_38;
+ crypto_int64 h6 = f0f6_2+f1f5_4 +f2f4_2 +f3f3_2 +f7f9_76+f8f8_19;
+ crypto_int64 h7 = f0f7_2+f1f6_2 +f2f5_2 +f3f4_2 +f8f9_38;
+ crypto_int64 h8 = f0f8_2+f1f7_4 +f2f6_2 +f3f5_4 +f4f4 +f9f9_38;
+ crypto_int64 h9 = f0f9_2+f1f8_2 +f2f7_2 +f3f6_2 +f4f5_2;
+ crypto_int64 carry0;
+ crypto_int64 carry1;
+ crypto_int64 carry2;
+ crypto_int64 carry3;
+ crypto_int64 carry4;
+ crypto_int64 carry5;
+ crypto_int64 carry6;
+ crypto_int64 carry7;
+ crypto_int64 carry8;
+ crypto_int64 carry9;
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+
+ carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25;
+ carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25;
+
+ carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26;
+ carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26;
+
+ carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25;
+ carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25;
+
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+ carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26;
+
+ carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25;
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+/*
+h = 2 * f * f
+Can overlap h with f.
+
+Preconditions:
+ |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
+
+Postconditions:
+ |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
+*/
+
+/*
+See fe_mul.c for discussion of implementation strategy.
+*/
+
+static void fe_sq2(fe h,const fe f)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 f0_2 = 2 * f0;
+ crypto_int32 f1_2 = 2 * f1;
+ crypto_int32 f2_2 = 2 * f2;
+ crypto_int32 f3_2 = 2 * f3;
+ crypto_int32 f4_2 = 2 * f4;
+ crypto_int32 f5_2 = 2 * f5;
+ crypto_int32 f6_2 = 2 * f6;
+ crypto_int32 f7_2 = 2 * f7;
+ crypto_int32 f5_38 = 38 * f5; /* 1.959375*2^30 */
+ crypto_int32 f6_19 = 19 * f6; /* 1.959375*2^30 */
+ crypto_int32 f7_38 = 38 * f7; /* 1.959375*2^30 */
+ crypto_int32 f8_19 = 19 * f8; /* 1.959375*2^30 */
+ crypto_int32 f9_38 = 38 * f9; /* 1.959375*2^30 */
+ crypto_int64 f0f0 = f0 * (crypto_int64) f0;
+ crypto_int64 f0f1_2 = f0_2 * (crypto_int64) f1;
+ crypto_int64 f0f2_2 = f0_2 * (crypto_int64) f2;
+ crypto_int64 f0f3_2 = f0_2 * (crypto_int64) f3;
+ crypto_int64 f0f4_2 = f0_2 * (crypto_int64) f4;
+ crypto_int64 f0f5_2 = f0_2 * (crypto_int64) f5;
+ crypto_int64 f0f6_2 = f0_2 * (crypto_int64) f6;
+ crypto_int64 f0f7_2 = f0_2 * (crypto_int64) f7;
+ crypto_int64 f0f8_2 = f0_2 * (crypto_int64) f8;
+ crypto_int64 f0f9_2 = f0_2 * (crypto_int64) f9;
+ crypto_int64 f1f1_2 = f1_2 * (crypto_int64) f1;
+ crypto_int64 f1f2_2 = f1_2 * (crypto_int64) f2;
+ crypto_int64 f1f3_4 = f1_2 * (crypto_int64) f3_2;
+ crypto_int64 f1f4_2 = f1_2 * (crypto_int64) f4;
+ crypto_int64 f1f5_4 = f1_2 * (crypto_int64) f5_2;
+ crypto_int64 f1f6_2 = f1_2 * (crypto_int64) f6;
+ crypto_int64 f1f7_4 = f1_2 * (crypto_int64) f7_2;
+ crypto_int64 f1f8_2 = f1_2 * (crypto_int64) f8;
+ crypto_int64 f1f9_76 = f1_2 * (crypto_int64) f9_38;
+ crypto_int64 f2f2 = f2 * (crypto_int64) f2;
+ crypto_int64 f2f3_2 = f2_2 * (crypto_int64) f3;
+ crypto_int64 f2f4_2 = f2_2 * (crypto_int64) f4;
+ crypto_int64 f2f5_2 = f2_2 * (crypto_int64) f5;
+ crypto_int64 f2f6_2 = f2_2 * (crypto_int64) f6;
+ crypto_int64 f2f7_2 = f2_2 * (crypto_int64) f7;
+ crypto_int64 f2f8_38 = f2_2 * (crypto_int64) f8_19;
+ crypto_int64 f2f9_38 = f2 * (crypto_int64) f9_38;
+ crypto_int64 f3f3_2 = f3_2 * (crypto_int64) f3;
+ crypto_int64 f3f4_2 = f3_2 * (crypto_int64) f4;
+ crypto_int64 f3f5_4 = f3_2 * (crypto_int64) f5_2;
+ crypto_int64 f3f6_2 = f3_2 * (crypto_int64) f6;
+ crypto_int64 f3f7_76 = f3_2 * (crypto_int64) f7_38;
+ crypto_int64 f3f8_38 = f3_2 * (crypto_int64) f8_19;
+ crypto_int64 f3f9_76 = f3_2 * (crypto_int64) f9_38;
+ crypto_int64 f4f4 = f4 * (crypto_int64) f4;
+ crypto_int64 f4f5_2 = f4_2 * (crypto_int64) f5;
+ crypto_int64 f4f6_38 = f4_2 * (crypto_int64) f6_19;
+ crypto_int64 f4f7_38 = f4 * (crypto_int64) f7_38;
+ crypto_int64 f4f8_38 = f4_2 * (crypto_int64) f8_19;
+ crypto_int64 f4f9_38 = f4 * (crypto_int64) f9_38;
+ crypto_int64 f5f5_38 = f5 * (crypto_int64) f5_38;
+ crypto_int64 f5f6_38 = f5_2 * (crypto_int64) f6_19;
+ crypto_int64 f5f7_76 = f5_2 * (crypto_int64) f7_38;
+ crypto_int64 f5f8_38 = f5_2 * (crypto_int64) f8_19;
+ crypto_int64 f5f9_76 = f5_2 * (crypto_int64) f9_38;
+ crypto_int64 f6f6_19 = f6 * (crypto_int64) f6_19;
+ crypto_int64 f6f7_38 = f6 * (crypto_int64) f7_38;
+ crypto_int64 f6f8_38 = f6_2 * (crypto_int64) f8_19;
+ crypto_int64 f6f9_38 = f6 * (crypto_int64) f9_38;
+ crypto_int64 f7f7_38 = f7 * (crypto_int64) f7_38;
+ crypto_int64 f7f8_38 = f7_2 * (crypto_int64) f8_19;
+ crypto_int64 f7f9_76 = f7_2 * (crypto_int64) f9_38;
+ crypto_int64 f8f8_19 = f8 * (crypto_int64) f8_19;
+ crypto_int64 f8f9_38 = f8 * (crypto_int64) f9_38;
+ crypto_int64 f9f9_38 = f9 * (crypto_int64) f9_38;
+ crypto_int64 h0 = f0f0 +f1f9_76+f2f8_38+f3f7_76+f4f6_38+f5f5_38;
+ crypto_int64 h1 = f0f1_2+f2f9_38+f3f8_38+f4f7_38+f5f6_38;
+ crypto_int64 h2 = f0f2_2+f1f1_2 +f3f9_76+f4f8_38+f5f7_76+f6f6_19;
+ crypto_int64 h3 = f0f3_2+f1f2_2 +f4f9_38+f5f8_38+f6f7_38;
+ crypto_int64 h4 = f0f4_2+f1f3_4 +f2f2 +f5f9_76+f6f8_38+f7f7_38;
+ crypto_int64 h5 = f0f5_2+f1f4_2 +f2f3_2 +f6f9_38+f7f8_38;
+ crypto_int64 h6 = f0f6_2+f1f5_4 +f2f4_2 +f3f3_2 +f7f9_76+f8f8_19;
+ crypto_int64 h7 = f0f7_2+f1f6_2 +f2f5_2 +f3f4_2 +f8f9_38;
+ crypto_int64 h8 = f0f8_2+f1f7_4 +f2f6_2 +f3f5_4 +f4f4 +f9f9_38;
+ crypto_int64 h9 = f0f9_2+f1f8_2 +f2f7_2 +f3f6_2 +f4f5_2;
+ crypto_int64 carry0;
+ crypto_int64 carry1;
+ crypto_int64 carry2;
+ crypto_int64 carry3;
+ crypto_int64 carry4;
+ crypto_int64 carry5;
+ crypto_int64 carry6;
+ crypto_int64 carry7;
+ crypto_int64 carry8;
+ crypto_int64 carry9;
+
+ h0 += h0;
+ h1 += h1;
+ h2 += h2;
+ h3 += h3;
+ h4 += h4;
+ h5 += h5;
+ h6 += h6;
+ h7 += h7;
+ h8 += h8;
+ h9 += h9;
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+
+ carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25;
+ carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25;
+
+ carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26;
+ carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26;
+
+ carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25;
+ carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25;
+
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+ carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26;
+
+ carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25;
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+/*
+h = f * g
+Can overlap h with f or g.
+
+Preconditions:
+ |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
+ |g| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
+
+Postconditions:
+ |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
+*/
+
+/*
+Notes on implementation strategy:
+
+Using schoolbook multiplication.
+Karatsuba would save a little in some cost models.
+
+Most multiplications by 2 and 19 are 32-bit precomputations;
+cheaper than 64-bit postcomputations.
+
+There is one remaining multiplication by 19 in the carry chain;
+one *19 precomputation can be merged into this,
+but the resulting data flow is considerably less clean.
+
+There are 12 carries below.
+10 of them are 2-way parallelizable and vectorizable.
+Can get away with 11 carries, but then data flow is much deeper.
+
+With tighter constraints on inputs can squeeze carries into int32.
+*/
+
+static void fe_mul(fe h,const fe f,const fe g)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 g0 = g[0];
+ crypto_int32 g1 = g[1];
+ crypto_int32 g2 = g[2];
+ crypto_int32 g3 = g[3];
+ crypto_int32 g4 = g[4];
+ crypto_int32 g5 = g[5];
+ crypto_int32 g6 = g[6];
+ crypto_int32 g7 = g[7];
+ crypto_int32 g8 = g[8];
+ crypto_int32 g9 = g[9];
+ crypto_int32 g1_19 = 19 * g1; /* 1.959375*2^29 */
+ crypto_int32 g2_19 = 19 * g2; /* 1.959375*2^30; still ok */
+ crypto_int32 g3_19 = 19 * g3;
+ crypto_int32 g4_19 = 19 * g4;
+ crypto_int32 g5_19 = 19 * g5;
+ crypto_int32 g6_19 = 19 * g6;
+ crypto_int32 g7_19 = 19 * g7;
+ crypto_int32 g8_19 = 19 * g8;
+ crypto_int32 g9_19 = 19 * g9;
+ crypto_int32 f1_2 = 2 * f1;
+ crypto_int32 f3_2 = 2 * f3;
+ crypto_int32 f5_2 = 2 * f5;
+ crypto_int32 f7_2 = 2 * f7;
+ crypto_int32 f9_2 = 2 * f9;
+ crypto_int64 f0g0 = f0 * (crypto_int64) g0;
+ crypto_int64 f0g1 = f0 * (crypto_int64) g1;
+ crypto_int64 f0g2 = f0 * (crypto_int64) g2;
+ crypto_int64 f0g3 = f0 * (crypto_int64) g3;
+ crypto_int64 f0g4 = f0 * (crypto_int64) g4;
+ crypto_int64 f0g5 = f0 * (crypto_int64) g5;
+ crypto_int64 f0g6 = f0 * (crypto_int64) g6;
+ crypto_int64 f0g7 = f0 * (crypto_int64) g7;
+ crypto_int64 f0g8 = f0 * (crypto_int64) g8;
+ crypto_int64 f0g9 = f0 * (crypto_int64) g9;
+ crypto_int64 f1g0 = f1 * (crypto_int64) g0;
+ crypto_int64 f1g1_2 = f1_2 * (crypto_int64) g1;
+ crypto_int64 f1g2 = f1 * (crypto_int64) g2;
+ crypto_int64 f1g3_2 = f1_2 * (crypto_int64) g3;
+ crypto_int64 f1g4 = f1 * (crypto_int64) g4;
+ crypto_int64 f1g5_2 = f1_2 * (crypto_int64) g5;
+ crypto_int64 f1g6 = f1 * (crypto_int64) g6;
+ crypto_int64 f1g7_2 = f1_2 * (crypto_int64) g7;
+ crypto_int64 f1g8 = f1 * (crypto_int64) g8;
+ crypto_int64 f1g9_38 = f1_2 * (crypto_int64) g9_19;
+ crypto_int64 f2g0 = f2 * (crypto_int64) g0;
+ crypto_int64 f2g1 = f2 * (crypto_int64) g1;
+ crypto_int64 f2g2 = f2 * (crypto_int64) g2;
+ crypto_int64 f2g3 = f2 * (crypto_int64) g3;
+ crypto_int64 f2g4 = f2 * (crypto_int64) g4;
+ crypto_int64 f2g5 = f2 * (crypto_int64) g5;
+ crypto_int64 f2g6 = f2 * (crypto_int64) g6;
+ crypto_int64 f2g7 = f2 * (crypto_int64) g7;
+ crypto_int64 f2g8_19 = f2 * (crypto_int64) g8_19;
+ crypto_int64 f2g9_19 = f2 * (crypto_int64) g9_19;
+ crypto_int64 f3g0 = f3 * (crypto_int64) g0;
+ crypto_int64 f3g1_2 = f3_2 * (crypto_int64) g1;
+ crypto_int64 f3g2 = f3 * (crypto_int64) g2;
+ crypto_int64 f3g3_2 = f3_2 * (crypto_int64) g3;
+ crypto_int64 f3g4 = f3 * (crypto_int64) g4;
+ crypto_int64 f3g5_2 = f3_2 * (crypto_int64) g5;
+ crypto_int64 f3g6 = f3 * (crypto_int64) g6;
+ crypto_int64 f3g7_38 = f3_2 * (crypto_int64) g7_19;
+ crypto_int64 f3g8_19 = f3 * (crypto_int64) g8_19;
+ crypto_int64 f3g9_38 = f3_2 * (crypto_int64) g9_19;
+ crypto_int64 f4g0 = f4 * (crypto_int64) g0;
+ crypto_int64 f4g1 = f4 * (crypto_int64) g1;
+ crypto_int64 f4g2 = f4 * (crypto_int64) g2;
+ crypto_int64 f4g3 = f4 * (crypto_int64) g3;
+ crypto_int64 f4g4 = f4 * (crypto_int64) g4;
+ crypto_int64 f4g5 = f4 * (crypto_int64) g5;
+ crypto_int64 f4g6_19 = f4 * (crypto_int64) g6_19;
+ crypto_int64 f4g7_19 = f4 * (crypto_int64) g7_19;
+ crypto_int64 f4g8_19 = f4 * (crypto_int64) g8_19;
+ crypto_int64 f4g9_19 = f4 * (crypto_int64) g9_19;
+ crypto_int64 f5g0 = f5 * (crypto_int64) g0;
+ crypto_int64 f5g1_2 = f5_2 * (crypto_int64) g1;
+ crypto_int64 f5g2 = f5 * (crypto_int64) g2;
+ crypto_int64 f5g3_2 = f5_2 * (crypto_int64) g3;
+ crypto_int64 f5g4 = f5 * (crypto_int64) g4;
+ crypto_int64 f5g5_38 = f5_2 * (crypto_int64) g5_19;
+ crypto_int64 f5g6_19 = f5 * (crypto_int64) g6_19;
+ crypto_int64 f5g7_38 = f5_2 * (crypto_int64) g7_19;
+ crypto_int64 f5g8_19 = f5 * (crypto_int64) g8_19;
+ crypto_int64 f5g9_38 = f5_2 * (crypto_int64) g9_19;
+ crypto_int64 f6g0 = f6 * (crypto_int64) g0;
+ crypto_int64 f6g1 = f6 * (crypto_int64) g1;
+ crypto_int64 f6g2 = f6 * (crypto_int64) g2;
+ crypto_int64 f6g3 = f6 * (crypto_int64) g3;
+ crypto_int64 f6g4_19 = f6 * (crypto_int64) g4_19;
+ crypto_int64 f6g5_19 = f6 * (crypto_int64) g5_19;
+ crypto_int64 f6g6_19 = f6 * (crypto_int64) g6_19;
+ crypto_int64 f6g7_19 = f6 * (crypto_int64) g7_19;
+ crypto_int64 f6g8_19 = f6 * (crypto_int64) g8_19;
+ crypto_int64 f6g9_19 = f6 * (crypto_int64) g9_19;
+ crypto_int64 f7g0 = f7 * (crypto_int64) g0;
+ crypto_int64 f7g1_2 = f7_2 * (crypto_int64) g1;
+ crypto_int64 f7g2 = f7 * (crypto_int64) g2;
+ crypto_int64 f7g3_38 = f7_2 * (crypto_int64) g3_19;
+ crypto_int64 f7g4_19 = f7 * (crypto_int64) g4_19;
+ crypto_int64 f7g5_38 = f7_2 * (crypto_int64) g5_19;
+ crypto_int64 f7g6_19 = f7 * (crypto_int64) g6_19;
+ crypto_int64 f7g7_38 = f7_2 * (crypto_int64) g7_19;
+ crypto_int64 f7g8_19 = f7 * (crypto_int64) g8_19;
+ crypto_int64 f7g9_38 = f7_2 * (crypto_int64) g9_19;
+ crypto_int64 f8g0 = f8 * (crypto_int64) g0;
+ crypto_int64 f8g1 = f8 * (crypto_int64) g1;
+ crypto_int64 f8g2_19 = f8 * (crypto_int64) g2_19;
+ crypto_int64 f8g3_19 = f8 * (crypto_int64) g3_19;
+ crypto_int64 f8g4_19 = f8 * (crypto_int64) g4_19;
+ crypto_int64 f8g5_19 = f8 * (crypto_int64) g5_19;
+ crypto_int64 f8g6_19 = f8 * (crypto_int64) g6_19;
+ crypto_int64 f8g7_19 = f8 * (crypto_int64) g7_19;
+ crypto_int64 f8g8_19 = f8 * (crypto_int64) g8_19;
+ crypto_int64 f8g9_19 = f8 * (crypto_int64) g9_19;
+ crypto_int64 f9g0 = f9 * (crypto_int64) g0;
+ crypto_int64 f9g1_38 = f9_2 * (crypto_int64) g1_19;
+ crypto_int64 f9g2_19 = f9 * (crypto_int64) g2_19;
+ crypto_int64 f9g3_38 = f9_2 * (crypto_int64) g3_19;
+ crypto_int64 f9g4_19 = f9 * (crypto_int64) g4_19;
+ crypto_int64 f9g5_38 = f9_2 * (crypto_int64) g5_19;
+ crypto_int64 f9g6_19 = f9 * (crypto_int64) g6_19;
+ crypto_int64 f9g7_38 = f9_2 * (crypto_int64) g7_19;
+ crypto_int64 f9g8_19 = f9 * (crypto_int64) g8_19;
+ crypto_int64 f9g9_38 = f9_2 * (crypto_int64) g9_19;
+ crypto_int64 h0 = f0g0+f1g9_38+f2g8_19+f3g7_38+f4g6_19+f5g5_38+f6g4_19+f7g3_38+f8g2_19+f9g1_38;
+ crypto_int64 h1 = f0g1+f1g0 +f2g9_19+f3g8_19+f4g7_19+f5g6_19+f6g5_19+f7g4_19+f8g3_19+f9g2_19;
+ crypto_int64 h2 = f0g2+f1g1_2 +f2g0 +f3g9_38+f4g8_19+f5g7_38+f6g6_19+f7g5_38+f8g4_19+f9g3_38;
+ crypto_int64 h3 = f0g3+f1g2 +f2g1 +f3g0 +f4g9_19+f5g8_19+f6g7_19+f7g6_19+f8g5_19+f9g4_19;
+ crypto_int64 h4 = f0g4+f1g3_2 +f2g2 +f3g1_2 +f4g0 +f5g9_38+f6g8_19+f7g7_38+f8g6_19+f9g5_38;
+ crypto_int64 h5 = f0g5+f1g4 +f2g3 +f3g2 +f4g1 +f5g0 +f6g9_19+f7g8_19+f8g7_19+f9g6_19;
+ crypto_int64 h6 = f0g6+f1g5_2 +f2g4 +f3g3_2 +f4g2 +f5g1_2 +f6g0 +f7g9_38+f8g8_19+f9g7_38;
+ crypto_int64 h7 = f0g7+f1g6 +f2g5 +f3g4 +f4g3 +f5g2 +f6g1 +f7g0 +f8g9_19+f9g8_19;
+ crypto_int64 h8 = f0g8+f1g7_2 +f2g6 +f3g5_2 +f4g4 +f5g3_2 +f6g2 +f7g1_2 +f8g0 +f9g9_38;
+ crypto_int64 h9 = f0g9+f1g8 +f2g7 +f3g6 +f4g5 +f5g4 +f6g3 +f7g2 +f8g1 +f9g0 ;
+ crypto_int64 carry0;
+ crypto_int64 carry1;
+ crypto_int64 carry2;
+ crypto_int64 carry3;
+ crypto_int64 carry4;
+ crypto_int64 carry5;
+ crypto_int64 carry6;
+ crypto_int64 carry7;
+ crypto_int64 carry8;
+ crypto_int64 carry9;
+
+ /*
+ |h0| <= (1.65*1.65*2^52*(1+19+19+19+19)+1.65*1.65*2^50*(38+38+38+38+38))
+ i.e. |h0| <= 1.4*2^60; narrower ranges for h2, h4, h6, h8
+ |h1| <= (1.65*1.65*2^51*(1+1+19+19+19+19+19+19+19+19))
+ i.e. |h1| <= 1.7*2^59; narrower ranges for h3, h5, h7, h9
+ */
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+ /* |h0| <= 2^25 */
+ /* |h4| <= 2^25 */
+ /* |h1| <= 1.71*2^59 */
+ /* |h5| <= 1.71*2^59 */
+
+ carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25;
+ carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25;
+ /* |h1| <= 2^24; from now on fits into int32 */
+ /* |h5| <= 2^24; from now on fits into int32 */
+ /* |h2| <= 1.41*2^60 */
+ /* |h6| <= 1.41*2^60 */
+
+ carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26;
+ carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26;
+ /* |h2| <= 2^25; from now on fits into int32 unchanged */
+ /* |h6| <= 2^25; from now on fits into int32 unchanged */
+ /* |h3| <= 1.71*2^59 */
+ /* |h7| <= 1.71*2^59 */
+
+ carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25;
+ carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25;
+ /* |h3| <= 2^24; from now on fits into int32 unchanged */
+ /* |h7| <= 2^24; from now on fits into int32 unchanged */
+ /* |h4| <= 1.72*2^34 */
+ /* |h8| <= 1.41*2^60 */
+
+ carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26;
+ carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26;
+ /* |h4| <= 2^25; from now on fits into int32 unchanged */
+ /* |h8| <= 2^25; from now on fits into int32 unchanged */
+ /* |h5| <= 1.01*2^24 */
+ /* |h9| <= 1.71*2^59 */
+
+ carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25;
+ /* |h9| <= 2^24; from now on fits into int32 unchanged */
+ /* |h0| <= 1.1*2^39 */
+
+ carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26;
+ /* |h0| <= 2^25; from now on fits into int32 unchanged */
+ /* |h1| <= 1.01*2^24 */
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+
+/*
+return 1 if f is in {1,3,5,...,q-2}
+return 0 if f is in {0,2,4,...,q-1}
+
+Preconditions:
+ |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+*/
+
+static int fe_isnegative(const fe f)
+{
+ unsigned char s[32];
+ fe_tobytes(s,f);
+ return s[0] & 1;
+}
+
+
+/*
+return 1 if f == 0
+return 0 if f != 0
+
+Preconditions:
+ |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+*/
+
+static const unsigned char zero[32] = {0};
+
+static int fe_isnonzero(const fe f)
+{
+ unsigned char s[32];
+ fe_tobytes(s,f);
+ return crypto_verify_32(s,zero);
+}
+
+/*
+h = -f
+
+Preconditions:
+ |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+
+Postconditions:
+ |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+*/
+
+static void fe_neg(fe h,const fe f)
+{
+ crypto_int32 f0 = f[0];
+ crypto_int32 f1 = f[1];
+ crypto_int32 f2 = f[2];
+ crypto_int32 f3 = f[3];
+ crypto_int32 f4 = f[4];
+ crypto_int32 f5 = f[5];
+ crypto_int32 f6 = f[6];
+ crypto_int32 f7 = f[7];
+ crypto_int32 f8 = f[8];
+ crypto_int32 f9 = f[9];
+ crypto_int32 h0 = -f0;
+ crypto_int32 h1 = -f1;
+ crypto_int32 h2 = -f2;
+ crypto_int32 h3 = -f3;
+ crypto_int32 h4 = -f4;
+ crypto_int32 h5 = -f5;
+ crypto_int32 h6 = -f6;
+ crypto_int32 h7 = -f7;
+ crypto_int32 h8 = -f8;
+ crypto_int32 h9 = -f9;
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+static void fe_invert(fe out,const fe z)
+{
+ fe t0;
+ fe t1;
+ fe t2;
+ fe t3;
+ int i;
+
+
+/* qhasm: fe z1 */
+
+/* qhasm: fe z2 */
+
+/* qhasm: fe z8 */
+
+/* qhasm: fe z9 */
+
+/* qhasm: fe z11 */
+
+/* qhasm: fe z22 */
+
+/* qhasm: fe z_5_0 */
+
+/* qhasm: fe z_10_5 */
+
+/* qhasm: fe z_10_0 */
+
+/* qhasm: fe z_20_10 */
+
+/* qhasm: fe z_20_0 */
+
+/* qhasm: fe z_40_20 */
+
+/* qhasm: fe z_40_0 */
+
+/* qhasm: fe z_50_10 */
+
+/* qhasm: fe z_50_0 */
+
+/* qhasm: fe z_100_50 */
+
+/* qhasm: fe z_100_0 */
+
+/* qhasm: fe z_200_100 */
+
+/* qhasm: fe z_200_0 */
+
+/* qhasm: fe z_250_50 */
+
+/* qhasm: fe z_250_0 */
+
+/* qhasm: fe z_255_5 */
+
+/* qhasm: fe z_255_21 */
+
+/* qhasm: enter pow225521 */
+
+/* qhasm: z2 = z1^2^1 */
+/* asm 1: fe_sq(>z2=fe#1,z2=fe#1,>z2=fe#1); */
+/* asm 2: fe_sq(>z2=t0,z2=t0,>z2=t0); */
+fe_sq(t0,z); for (i = 1;i < 1;++i) fe_sq(t0,t0);
+
+/* qhasm: z8 = z2^2^2 */
+/* asm 1: fe_sq(>z8=fe#2,z8=fe#2,>z8=fe#2); */
+/* asm 2: fe_sq(>z8=t1,z8=t1,>z8=t1); */
+fe_sq(t1,t0); for (i = 1;i < 2;++i) fe_sq(t1,t1);
+
+/* qhasm: z9 = z1*z8 */
+/* asm 1: fe_mul(>z9=fe#2,z9=t1,z11=fe#1,z11=t0,z22=fe#3,z22=fe#3,>z22=fe#3); */
+/* asm 2: fe_sq(>z22=t2,z22=t2,>z22=t2); */
+fe_sq(t2,t0); for (i = 1;i < 1;++i) fe_sq(t2,t2);
+
+/* qhasm: z_5_0 = z9*z22 */
+/* asm 1: fe_mul(>z_5_0=fe#2,z_5_0=t1,z_10_5=fe#3,z_10_5=fe#3,>z_10_5=fe#3); */
+/* asm 2: fe_sq(>z_10_5=t2,z_10_5=t2,>z_10_5=t2); */
+fe_sq(t2,t1); for (i = 1;i < 5;++i) fe_sq(t2,t2);
+
+/* qhasm: z_10_0 = z_10_5*z_5_0 */
+/* asm 1: fe_mul(>z_10_0=fe#2,z_10_0=t1,z_20_10=fe#3,z_20_10=fe#3,>z_20_10=fe#3); */
+/* asm 2: fe_sq(>z_20_10=t2,z_20_10=t2,>z_20_10=t2); */
+fe_sq(t2,t1); for (i = 1;i < 10;++i) fe_sq(t2,t2);
+
+/* qhasm: z_20_0 = z_20_10*z_10_0 */
+/* asm 1: fe_mul(>z_20_0=fe#3,z_20_0=t2,z_40_20=fe#4,z_40_20=fe#4,>z_40_20=fe#4); */
+/* asm 2: fe_sq(>z_40_20=t3,z_40_20=t3,>z_40_20=t3); */
+fe_sq(t3,t2); for (i = 1;i < 20;++i) fe_sq(t3,t3);
+
+/* qhasm: z_40_0 = z_40_20*z_20_0 */
+/* asm 1: fe_mul(>z_40_0=fe#3,z_40_0=t2,z_50_10=fe#3,z_50_10=fe#3,>z_50_10=fe#3); */
+/* asm 2: fe_sq(>z_50_10=t2,z_50_10=t2,>z_50_10=t2); */
+fe_sq(t2,t2); for (i = 1;i < 10;++i) fe_sq(t2,t2);
+
+/* qhasm: z_50_0 = z_50_10*z_10_0 */
+/* asm 1: fe_mul(>z_50_0=fe#2,z_50_0=t1,z_100_50=fe#3,z_100_50=fe#3,>z_100_50=fe#3); */
+/* asm 2: fe_sq(>z_100_50=t2,z_100_50=t2,>z_100_50=t2); */
+fe_sq(t2,t1); for (i = 1;i < 50;++i) fe_sq(t2,t2);
+
+/* qhasm: z_100_0 = z_100_50*z_50_0 */
+/* asm 1: fe_mul(>z_100_0=fe#3,z_100_0=t2,z_200_100=fe#4,z_200_100=fe#4,>z_200_100=fe#4); */
+/* asm 2: fe_sq(>z_200_100=t3,z_200_100=t3,>z_200_100=t3); */
+fe_sq(t3,t2); for (i = 1;i < 100;++i) fe_sq(t3,t3);
+
+/* qhasm: z_200_0 = z_200_100*z_100_0 */
+/* asm 1: fe_mul(>z_200_0=fe#3,z_200_0=t2,z_250_50=fe#3,z_250_50=fe#3,>z_250_50=fe#3); */
+/* asm 2: fe_sq(>z_250_50=t2,z_250_50=t2,>z_250_50=t2); */
+fe_sq(t2,t2); for (i = 1;i < 50;++i) fe_sq(t2,t2);
+
+/* qhasm: z_250_0 = z_250_50*z_50_0 */
+/* asm 1: fe_mul(>z_250_0=fe#2,z_250_0=t1,z_255_5=fe#2,z_255_5=fe#2,>z_255_5=fe#2); */
+/* asm 2: fe_sq(>z_255_5=t1,z_255_5=t1,>z_255_5=t1); */
+fe_sq(t1,t1); for (i = 1;i < 5;++i) fe_sq(t1,t1);
+
+/* qhasm: z_255_21 = z_255_5*z11 */
+/* asm 1: fe_mul(>z_255_21=fe#12,z_255_21=out,z2=fe#1,z2=fe#1,>z2=fe#1); */
+/* asm 2: fe_sq(>z2=t0,z2=t0,>z2=t0); */
+fe_sq(t0,z); for (i = 1;i < 1;++i) fe_sq(t0,t0);
+
+/* qhasm: z8 = z2^2^2 */
+/* asm 1: fe_sq(>z8=fe#2,z8=fe#2,>z8=fe#2); */
+/* asm 2: fe_sq(>z8=t1,z8=t1,>z8=t1); */
+fe_sq(t1,t0); for (i = 1;i < 2;++i) fe_sq(t1,t1);
+
+/* qhasm: z9 = z1*z8 */
+/* asm 1: fe_mul(>z9=fe#2,z9=t1,z11=fe#1,z11=t0,z22=fe#1,z22=fe#1,>z22=fe#1); */
+/* asm 2: fe_sq(>z22=t0,z22=t0,>z22=t0); */
+fe_sq(t0,t0); for (i = 1;i < 1;++i) fe_sq(t0,t0);
+
+/* qhasm: z_5_0 = z9*z22 */
+/* asm 1: fe_mul(>z_5_0=fe#1,z_5_0=t0,z_10_5=fe#2,z_10_5=fe#2,>z_10_5=fe#2); */
+/* asm 2: fe_sq(>z_10_5=t1,z_10_5=t1,>z_10_5=t1); */
+fe_sq(t1,t0); for (i = 1;i < 5;++i) fe_sq(t1,t1);
+
+/* qhasm: z_10_0 = z_10_5*z_5_0 */
+/* asm 1: fe_mul(>z_10_0=fe#1,z_10_0=t0,z_20_10=fe#2,z_20_10=fe#2,>z_20_10=fe#2); */
+/* asm 2: fe_sq(>z_20_10=t1,z_20_10=t1,>z_20_10=t1); */
+fe_sq(t1,t0); for (i = 1;i < 10;++i) fe_sq(t1,t1);
+
+/* qhasm: z_20_0 = z_20_10*z_10_0 */
+/* asm 1: fe_mul(>z_20_0=fe#2,z_20_0=t1,z_40_20=fe#3,z_40_20=fe#3,>z_40_20=fe#3); */
+/* asm 2: fe_sq(>z_40_20=t2,z_40_20=t2,>z_40_20=t2); */
+fe_sq(t2,t1); for (i = 1;i < 20;++i) fe_sq(t2,t2);
+
+/* qhasm: z_40_0 = z_40_20*z_20_0 */
+/* asm 1: fe_mul(>z_40_0=fe#2,z_40_0=t1,z_50_10=fe#2,z_50_10=fe#2,>z_50_10=fe#2); */
+/* asm 2: fe_sq(>z_50_10=t1,z_50_10=t1,>z_50_10=t1); */
+fe_sq(t1,t1); for (i = 1;i < 10;++i) fe_sq(t1,t1);
+
+/* qhasm: z_50_0 = z_50_10*z_10_0 */
+/* asm 1: fe_mul(>z_50_0=fe#1,z_50_0=t0,z_100_50=fe#2,z_100_50=fe#2,>z_100_50=fe#2); */
+/* asm 2: fe_sq(>z_100_50=t1,z_100_50=t1,>z_100_50=t1); */
+fe_sq(t1,t0); for (i = 1;i < 50;++i) fe_sq(t1,t1);
+
+/* qhasm: z_100_0 = z_100_50*z_50_0 */
+/* asm 1: fe_mul(>z_100_0=fe#2,z_100_0=t1,z_200_100=fe#3,z_200_100=fe#3,>z_200_100=fe#3); */
+/* asm 2: fe_sq(>z_200_100=t2,z_200_100=t2,>z_200_100=t2); */
+fe_sq(t2,t1); for (i = 1;i < 100;++i) fe_sq(t2,t2);
+
+/* qhasm: z_200_0 = z_200_100*z_100_0 */
+/* asm 1: fe_mul(>z_200_0=fe#2,z_200_0=t1,z_250_50=fe#2,z_250_50=fe#2,>z_250_50=fe#2); */
+/* asm 2: fe_sq(>z_250_50=t1,z_250_50=t1,>z_250_50=t1); */
+fe_sq(t1,t1); for (i = 1;i < 50;++i) fe_sq(t1,t1);
+
+/* qhasm: z_250_0 = z_250_50*z_50_0 */
+/* asm 1: fe_mul(>z_250_0=fe#1,z_250_0=t0,z_252_2=fe#1,z_252_2=fe#1,>z_252_2=fe#1); */
+/* asm 2: fe_sq(>z_252_2=t0,z_252_2=t0,>z_252_2=t0); */
+fe_sq(t0,t0); for (i = 1;i < 2;++i) fe_sq(t0,t0);
+
+/* qhasm: z_252_3 = z_252_2*z1 */
+/* asm 1: fe_mul(>z_252_3=fe#12,z_252_3=out,X);
+ fe_1(h->Y);
+ fe_1(h->Z);
+}
+
+static void ge_p3_0(ge_p3 *h)
+{
+ fe_0(h->X);
+ fe_1(h->Y);
+ fe_1(h->Z);
+ fe_0(h->T);
+}
+
+static void ge_precomp_0(ge_precomp *h)
+{
+ fe_1(h->yplusx);
+ fe_1(h->yminusx);
+ fe_0(h->xy2d);
+}
+
+/*
+r = p
+*/
+
+static void ge_p1p1_to_p2(ge_p2 *r,const ge_p1p1 *p)
+{
+ fe_mul(r->X,p->X,p->T);
+ fe_mul(r->Y,p->Y,p->Z);
+ fe_mul(r->Z,p->Z,p->T);
+}
+
+/*
+r = p
+*/
+
+static void ge_p1p1_to_p3(ge_p3 *r,const ge_p1p1 *p)
+{
+ fe_mul(r->X,p->X,p->T);
+ fe_mul(r->Y,p->Y,p->Z);
+ fe_mul(r->Z,p->Z,p->T);
+ fe_mul(r->T,p->X,p->Y);
+}
+
+/*
+r = p
+*/
+
+static void ge_p3_to_p2(ge_p2 *r,const ge_p3 *p)
+{
+ fe_copy(r->X,p->X);
+ fe_copy(r->Y,p->Y);
+ fe_copy(r->Z,p->Z);
+}
+
+
+/*
+r = 2 * p
+*/
+
+static void ge_p2_dbl(ge_p1p1 *r,const ge_p2 *p)
+{
+ fe t0;
+/* qhasm: enter ge_p2_dbl */
+
+/* qhasm: fe X1 */
+
+/* qhasm: fe Y1 */
+
+/* qhasm: fe Z1 */
+
+/* qhasm: fe A */
+
+/* qhasm: fe AA */
+
+/* qhasm: fe XX */
+
+/* qhasm: fe YY */
+
+/* qhasm: fe B */
+
+/* qhasm: fe X3 */
+
+/* qhasm: fe Y3 */
+
+/* qhasm: fe Z3 */
+
+/* qhasm: fe T3 */
+
+/* qhasm: XX=X1^2 */
+/* asm 1: fe_sq(>XX=fe#1,XX=r->X,X); */
+fe_sq(r->X,p->X);
+
+/* qhasm: YY=Y1^2 */
+/* asm 1: fe_sq(>YY=fe#3,YY=r->Z,Y); */
+fe_sq(r->Z,p->Y);
+
+/* qhasm: B=2*Z1^2 */
+/* asm 1: fe_sq2(>B=fe#4,B=r->T,Z); */
+fe_sq2(r->T,p->Z);
+
+/* qhasm: A=X1+Y1 */
+/* asm 1: fe_add(>A=fe#2,A=r->Y,X,Y); */
+fe_add(r->Y,p->X,p->Y);
+
+/* qhasm: AA=A^2 */
+/* asm 1: fe_sq(>AA=fe#5,AA=t0,Y); */
+fe_sq(t0,r->Y);
+
+/* qhasm: Y3=YY+XX */
+/* asm 1: fe_add(>Y3=fe#2,Y3=r->Y,Z,X); */
+fe_add(r->Y,r->Z,r->X);
+
+/* qhasm: Z3=YY-XX */
+/* asm 1: fe_sub(>Z3=fe#3,Z3=r->Z,Z,X); */
+fe_sub(r->Z,r->Z,r->X);
+
+/* qhasm: X3=AA-Y3 */
+/* asm 1: fe_sub(>X3=fe#1,X3=r->X,Y); */
+fe_sub(r->X,t0,r->Y);
+
+/* qhasm: T3=B-Z3 */
+/* asm 1: fe_sub(>T3=fe#4,T3=r->T,T,Z); */
+fe_sub(r->T,r->T,r->Z);
+
+/* qhasm: return */
+}
+
+
+/*
+r = 2 * p
+*/
+
+static void ge_p3_dbl(ge_p1p1 *r,const ge_p3 *p)
+{
+ ge_p2 q;
+ ge_p3_to_p2(&q,p);
+ ge_p2_dbl(r,&q);
+}
+
+
+/*
+r = p
+*/
+
+static void ge_p3_to_cached(ge_cached *r,const ge_p3 *p)
+{
+ fe_add(r->YplusX,p->Y,p->X);
+ fe_sub(r->YminusX,p->Y,p->X);
+ fe_copy(r->Z,p->Z);
+ fe_mul(r->T2d,p->T,d2);
+}
+
+/*
+r = p + q
+*/
+
+static void ge_add(ge_p1p1 *r,const ge_p3 *p,const ge_cached *q)
+{
+ fe t0;
+/* qhasm: enter ge_add */
+
+/* qhasm: fe X1 */
+
+/* qhasm: fe Y1 */
+
+/* qhasm: fe Z1 */
+
+/* qhasm: fe Z2 */
+
+/* qhasm: fe T1 */
+
+/* qhasm: fe ZZ */
+
+/* qhasm: fe YpX2 */
+
+/* qhasm: fe YmX2 */
+
+/* qhasm: fe T2d2 */
+
+/* qhasm: fe X3 */
+
+/* qhasm: fe Y3 */
+
+/* qhasm: fe Z3 */
+
+/* qhasm: fe T3 */
+
+/* qhasm: fe YpX1 */
+
+/* qhasm: fe YmX1 */
+
+/* qhasm: fe A */
+
+/* qhasm: fe B */
+
+/* qhasm: fe C */
+
+/* qhasm: fe D */
+
+/* qhasm: YpX1 = Y1+X1 */
+/* asm 1: fe_add(>YpX1=fe#1,YpX1=r->X,Y,X); */
+fe_add(r->X,p->Y,p->X);
+
+/* qhasm: YmX1 = Y1-X1 */
+/* asm 1: fe_sub(>YmX1=fe#2,YmX1=r->Y,Y,X); */
+fe_sub(r->Y,p->Y,p->X);
+
+/* qhasm: A = YpX1*YpX2 */
+/* asm 1: fe_mul(>A=fe#3,A=r->Z,X,YplusX); */
+fe_mul(r->Z,r->X,q->YplusX);
+
+/* qhasm: B = YmX1*YmX2 */
+/* asm 1: fe_mul(>B=fe#2,B=r->Y,Y,YminusX); */
+fe_mul(r->Y,r->Y,q->YminusX);
+
+/* qhasm: C = T2d2*T1 */
+/* asm 1: fe_mul(>C=fe#4,C=r->T,T2d,T); */
+fe_mul(r->T,q->T2d,p->T);
+
+/* qhasm: ZZ = Z1*Z2 */
+/* asm 1: fe_mul(>ZZ=fe#1,ZZ=r->X,Z,Z); */
+fe_mul(r->X,p->Z,q->Z);
+
+/* qhasm: D = 2*ZZ */
+/* asm 1: fe_add(>D=fe#5,D=t0,X,X); */
+fe_add(t0,r->X,r->X);
+
+/* qhasm: X3 = A-B */
+/* asm 1: fe_sub(>X3=fe#1,X3=r->X,Z,Y); */
+fe_sub(r->X,r->Z,r->Y);
+
+/* qhasm: Y3 = A+B */
+/* asm 1: fe_add(>Y3=fe#2,Y3=r->Y,Z,Y); */
+fe_add(r->Y,r->Z,r->Y);
+
+/* qhasm: Z3 = D+C */
+/* asm 1: fe_add(>Z3=fe#3,Z3=r->Z,T); */
+fe_add(r->Z,t0,r->T);
+
+/* qhasm: T3 = D-C */
+/* asm 1: fe_sub(>T3=fe#4,T3=r->T,T); */
+fe_sub(r->T,t0,r->T);
+
+/* qhasm: return */
+}
+
+
+/*
+r = p - q
+*/
+
+static void ge_sub(ge_p1p1 *r,const ge_p3 *p,const ge_cached *q)
+{
+ fe t0;
+/* qhasm: enter ge_sub */
+
+/* qhasm: fe X1 */
+
+/* qhasm: fe Y1 */
+
+/* qhasm: fe Z1 */
+
+/* qhasm: fe Z2 */
+
+/* qhasm: fe T1 */
+
+/* qhasm: fe ZZ */
+
+/* qhasm: fe YpX2 */
+
+/* qhasm: fe YmX2 */
+
+/* qhasm: fe T2d2 */
+
+/* qhasm: fe X3 */
+
+/* qhasm: fe Y3 */
+
+/* qhasm: fe Z3 */
+
+/* qhasm: fe T3 */
+
+/* qhasm: fe YpX1 */
+
+/* qhasm: fe YmX1 */
+
+/* qhasm: fe A */
+
+/* qhasm: fe B */
+
+/* qhasm: fe C */
+
+/* qhasm: fe D */
+
+/* qhasm: YpX1 = Y1+X1 */
+/* asm 1: fe_add(>YpX1=fe#1,YpX1=r->X,Y,X); */
+fe_add(r->X,p->Y,p->X);
+
+/* qhasm: YmX1 = Y1-X1 */
+/* asm 1: fe_sub(>YmX1=fe#2,YmX1=r->Y,Y,X); */
+fe_sub(r->Y,p->Y,p->X);
+
+/* qhasm: A = YpX1*YmX2 */
+/* asm 1: fe_mul(>A=fe#3,A=r->Z,X,YminusX); */
+fe_mul(r->Z,r->X,q->YminusX);
+
+/* qhasm: B = YmX1*YpX2 */
+/* asm 1: fe_mul(>B=fe#2,B=r->Y,Y,YplusX); */
+fe_mul(r->Y,r->Y,q->YplusX);
+
+/* qhasm: C = T2d2*T1 */
+/* asm 1: fe_mul(>C=fe#4,