curve25519_dalek/
scalar.rs

1// -*- mode: rust; -*-
2//
3// This file is part of curve25519-dalek.
4// Copyright (c) 2016-2021 isis lovecruft
5// Copyright (c) 2016-2019 Henry de Valence
6// Portions Copyright 2017 Brian Smith
7// See LICENSE for licensing information.
8//
9// Authors:
10// - Isis Agora Lovecruft <isis@patternsinthevoid.net>
11// - Henry de Valence <hdevalence@hdevalence.ca>
12// - Brian Smith <brian@briansmith.org>
13
14//! Arithmetic on scalars (integers mod the group order).
15//!
16//! Both the Ristretto group and the Ed25519 basepoint have prime order
17//! \\( \ell = 2\^{252} + 27742317777372353535851937790883648493 \\).
18//!
19//! This code is intended to be useful with both the Ristretto group
20//! (where everything is done modulo \\( \ell \\)), and the X/Ed25519
21//! setting, which mandates specific bit-twiddles that are not
22//! well-defined modulo \\( \ell \\).
23//!
24//! All arithmetic on `Scalars` is done modulo \\( \ell \\).
25//!
26//! # Constructing a scalar
27//!
28//! To create a [`Scalar`](struct.Scalar.html) from a supposedly canonical encoding, use
29//! [`Scalar::from_canonical_bytes`](struct.Scalar.html#method.from_canonical_bytes).
30//!
31//! This function does input validation, ensuring that the input bytes
32//! are the canonical encoding of a `Scalar`.
33//! If they are, we'll get
34//! `Some(Scalar)` in return:
35//!
36//! ```
37//! use curve25519_dalek::scalar::Scalar;
38//!
39//! let one_as_bytes: [u8; 32] = Scalar::ONE.to_bytes();
40//! let a: Option<Scalar> = Scalar::from_canonical_bytes(one_as_bytes).into();
41//!
42//! assert!(a.is_some());
43//! ```
44//!
45//! However, if we give it bytes representing a scalar larger than \\( \ell \\)
46//! (in this case, \\( \ell + 2 \\)), we'll get `None` back:
47//!
48//! ```
49//! use curve25519_dalek::scalar::Scalar;
50//!
51//! let l_plus_two_bytes: [u8; 32] = [
52//!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
53//!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
54//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
55//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
56//! ];
57//! let a: Option<Scalar> = Scalar::from_canonical_bytes(l_plus_two_bytes).into();
58//!
59//! assert!(a.is_none());
60//! ```
61//!
62//! Another way to create a `Scalar` is by reducing a \\(256\\)-bit integer mod
63//! \\( \ell \\), for which one may use the
64//! [`Scalar::from_bytes_mod_order`](struct.Scalar.html#method.from_bytes_mod_order)
65//! method.  In the case of the second example above, this would reduce the
66//! resultant scalar \\( \mod \ell \\), producing \\( 2 \\):
67//!
68//! ```
69//! use curve25519_dalek::scalar::Scalar;
70//!
71//! let l_plus_two_bytes: [u8; 32] = [
72//!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
73//!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
74//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
75//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
76//! ];
77//! let a: Scalar = Scalar::from_bytes_mod_order(l_plus_two_bytes);
78//!
79//! let two: Scalar = Scalar::ONE + Scalar::ONE;
80//!
81//! assert!(a == two);
82//! ```
83//!
84//! There is also a constructor that reduces a \\(512\\)-bit integer,
85//! [`Scalar::from_bytes_mod_order_wide`].
86//!
87//! To construct a `Scalar` as the hash of some input data, use
88//! [`Scalar::hash_from_bytes`], which takes a buffer, or
89//! [`Scalar::from_hash`], which allows an IUF API.
90//!
91#![cfg_attr(feature = "digest", doc = "```")]
92#![cfg_attr(not(feature = "digest"), doc = "```ignore")]
93//! # fn main() {
94//! use sha2::{Digest, Sha512};
95//! use curve25519_dalek::scalar::Scalar;
96//!
97//! // Hashing a single byte slice
98//! let a = Scalar::hash_from_bytes::<Sha512>(b"Abolish ICE");
99//!
100//! // Streaming data into a hash object
101//! let mut hasher = Sha512::default();
102//! hasher.update(b"Abolish ");
103//! hasher.update(b"ICE");
104//! let a2 = Scalar::from_hash(hasher);
105//!
106//! assert_eq!(a, a2);
107//! # }
108//! ```
109//!
110//! See also `Scalar::hash_from_bytes` and `Scalar::from_hash` that
111//! reduces a \\(512\\)-bit integer, if the optional `digest` feature
112//! has been enabled.
113
114use core::borrow::Borrow;
115use core::fmt::Debug;
116use core::iter::{Product, Sum};
117use core::ops::Index;
118use core::ops::Neg;
119use core::ops::{Add, AddAssign};
120use core::ops::{Mul, MulAssign};
121use core::ops::{Sub, SubAssign};
122
123use cfg_if::cfg_if;
124
125#[cfg(feature = "group")]
126use group::ff::{Field, FromUniformBytes, PrimeField};
127#[cfg(feature = "group-bits")]
128use group::ff::{FieldBits, PrimeFieldBits};
129
130#[cfg(any(test, feature = "group"))]
131use rand_core::RngCore;
132
133#[cfg(any(test, feature = "rand_core"))]
134use rand_core::CryptoRngCore;
135
136#[cfg(feature = "digest")]
137use digest::generic_array::typenum::U64;
138#[cfg(feature = "digest")]
139use digest::Digest;
140
141use subtle::Choice;
142use subtle::ConditionallySelectable;
143use subtle::ConstantTimeEq;
144use subtle::CtOption;
145
146#[cfg(feature = "zeroize")]
147use zeroize::Zeroize;
148
149use crate::backend;
150use crate::constants;
151
152cfg_if! {
153    if #[cfg(curve25519_dalek_backend = "fiat")] {
154        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
155        ///
156        /// This is a type alias for one of the scalar types in the `backend`
157        /// module.
158        #[cfg(curve25519_dalek_bits = "32")]
159        #[cfg_attr(
160            docsrs,
161            doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "32")))
162        )]
163        type UnpackedScalar = backend::serial::fiat_u32::scalar::Scalar29;
164
165        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
166        ///
167        /// This is a type alias for one of the scalar types in the `backend`
168        /// module.
169        #[cfg(curve25519_dalek_bits = "64")]
170        #[cfg_attr(
171            docsrs,
172            doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "64")))
173        )]
174        type UnpackedScalar = backend::serial::fiat_u64::scalar::Scalar52;
175    } else if #[cfg(curve25519_dalek_bits = "64")] {
176        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
177        ///
178        /// This is a type alias for one of the scalar types in the `backend`
179        /// module.
180        #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
181        type UnpackedScalar = backend::serial::u64::scalar::Scalar52;
182    } else {
183        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
184        ///
185        /// This is a type alias for one of the scalar types in the `backend`
186        /// module.
187        #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
188        type UnpackedScalar = backend::serial::u32::scalar::Scalar29;
189    }
190}
191
192/// The `Scalar` struct holds an element of \\(\mathbb Z / \ell\mathbb Z \\).
193#[allow(clippy::derived_hash_with_manual_eq)]
194#[derive(Copy, Clone, Hash)]
195pub struct Scalar {
196    /// `bytes` is a little-endian byte encoding of an integer representing a scalar modulo the
197    /// group order.
198    ///
199    /// # Invariant #1
200    ///
201    /// The integer representing this scalar is less than \\(2\^{255}\\). That is, the most
202    /// significant bit of `bytes[31]` is 0.
203    ///
204    /// This is required for `EdwardsPoint` variable- and fixed-base multiplication, because most
205    /// integers above 2^255 are unrepresentable in our radix-16 NAF (see [`Self::as_radix_16`]).
206    /// The invariant is also required because our `MontgomeryPoint` multiplication assumes the MSB
207    /// is 0 (see `MontgomeryPoint::mul`).
208    ///
209    /// # Invariant #2 (weak)
210    ///
211    /// The integer representing this scalar is less than \\(2\^{255} - 19 \\), i.e., it represents
212    /// a canonical representative of an element of \\( \mathbb Z / \ell\mathbb Z \\). This is
213    /// stronger than invariant #1. It also sometimes has to be broken.
214    ///
215    /// This invariant is deliberately broken in the implementation of `EdwardsPoint::{mul_clamped,
216    /// mul_base_clamped}`, `MontgomeryPoint::{mul_clamped, mul_base_clamped}`, and
217    /// `BasepointTable::mul_base_clamped`. This is not an issue though. As mentioned above,
218    /// scalar-point multiplication is defined for any choice of `bytes` that satisfies invariant
219    /// #1. Since clamping guarantees invariant #1 is satisfied, these operations are well defined.
220    ///
221    /// Note: Scalar-point mult is the _only_ thing you can do safely with an unreduced scalar.
222    /// Scalar-scalar addition and subtraction are NOT correct when using unreduced scalars.
223    /// Multiplication is correct, but this is only due to a quirk of our implementation, and not
224    /// guaranteed to hold in general in the future.
225    ///
226    /// Note: It is not possible to construct an unreduced `Scalar` from the public API unless the
227    /// `legacy_compatibility` is enabled (thus making `Scalar::from_bits` public). Thus, for all
228    /// public non-legacy uses, invariant #2
229    /// always holds.
230    ///
231    pub(crate) bytes: [u8; 32],
232}
233
234impl Scalar {
235    /// Construct a `Scalar` by reducing a 256-bit little-endian integer
236    /// modulo the group order \\( \ell \\).
237    pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar {
238        // Temporarily allow s_unreduced.bytes > 2^255 ...
239        let s_unreduced = Scalar { bytes };
240
241        // Then reduce mod the group order and return the reduced representative.
242        let s = s_unreduced.reduce();
243        debug_assert_eq!(0u8, s[31] >> 7);
244
245        s
246    }
247
248    /// Construct a `Scalar` by reducing a 512-bit little-endian integer
249    /// modulo the group order \\( \ell \\).
250    pub fn from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar {
251        UnpackedScalar::from_bytes_wide(input).pack()
252    }
253
254    /// Attempt to construct a `Scalar` from a canonical byte representation.
255    ///
256    /// # Return
257    ///
258    /// - `Some(s)`, where `s` is the `Scalar` corresponding to `bytes`,
259    ///   if `bytes` is a canonical byte representation modulo the group order \\( \ell \\);
260    /// - `None` if `bytes` is not a canonical byte representation.
261    pub fn from_canonical_bytes(bytes: [u8; 32]) -> CtOption<Scalar> {
262        let high_bit_unset = (bytes[31] >> 7).ct_eq(&0);
263        let candidate = Scalar { bytes };
264        CtOption::new(candidate, high_bit_unset & candidate.is_canonical())
265    }
266
267    /// Construct a `Scalar` from the low 255 bits of a 256-bit integer. This breaks the invariant
268    /// that scalars are always reduced. Scalar-scalar arithmetic, i.e., addition, subtraction,
269    /// multiplication, **does not work** on scalars produced from this function. You may only use
270    /// the output of this function for `EdwardsPoint::mul`, `MontgomeryPoint::mul`, and
271    /// `EdwardsPoint::vartime_double_scalar_mul_basepoint`. **Do not use this function** unless
272    /// you absolutely have to.
273    #[cfg(feature = "legacy_compatibility")]
274    #[deprecated(
275        since = "4.0.0",
276        note = "This constructor outputs scalars with undefined scalar-scalar arithmetic. See docs."
277    )]
278    pub const fn from_bits(bytes: [u8; 32]) -> Scalar {
279        let mut s = Scalar { bytes };
280        // Ensure invariant #1 holds. That is, make s < 2^255 by masking the high bit.
281        s.bytes[31] &= 0b0111_1111;
282
283        s
284    }
285}
286
287impl Debug for Scalar {
288    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
289        write!(f, "Scalar{{\n\tbytes: {:?},\n}}", &self.bytes)
290    }
291}
292
293impl Eq for Scalar {}
294impl PartialEq for Scalar {
295    fn eq(&self, other: &Self) -> bool {
296        self.ct_eq(other).into()
297    }
298}
299
300impl ConstantTimeEq for Scalar {
301    fn ct_eq(&self, other: &Self) -> Choice {
302        self.bytes.ct_eq(&other.bytes)
303    }
304}
305
306impl Index<usize> for Scalar {
307    type Output = u8;
308
309    /// Index the bytes of the representative for this `Scalar`.  Mutation is not permitted.
310    fn index(&self, _index: usize) -> &u8 {
311        &(self.bytes[_index])
312    }
313}
314
315impl<'a> MulAssign<&'a Scalar> for Scalar {
316    fn mul_assign(&mut self, _rhs: &'a Scalar) {
317        *self = UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack();
318    }
319}
320
321define_mul_assign_variants!(LHS = Scalar, RHS = Scalar);
322
323impl<'a> Mul<&'a Scalar> for &Scalar {
324    type Output = Scalar;
325    fn mul(self, _rhs: &'a Scalar) -> Scalar {
326        UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack()
327    }
328}
329
330define_mul_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
331
332impl<'a> AddAssign<&'a Scalar> for Scalar {
333    fn add_assign(&mut self, _rhs: &'a Scalar) {
334        *self = *self + _rhs;
335    }
336}
337
338define_add_assign_variants!(LHS = Scalar, RHS = Scalar);
339
340impl<'a> Add<&'a Scalar> for &Scalar {
341    type Output = Scalar;
342    #[allow(non_snake_case)]
343    fn add(self, _rhs: &'a Scalar) -> Scalar {
344        // The UnpackedScalar::add function produces reduced outputs if the inputs are reduced. By
345        // Scalar invariant #1, this is always the case.
346        UnpackedScalar::add(&self.unpack(), &_rhs.unpack()).pack()
347    }
348}
349
350define_add_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
351
352impl<'a> SubAssign<&'a Scalar> for Scalar {
353    fn sub_assign(&mut self, _rhs: &'a Scalar) {
354        *self = *self - _rhs;
355    }
356}
357
358define_sub_assign_variants!(LHS = Scalar, RHS = Scalar);
359
360impl<'a> Sub<&'a Scalar> for &Scalar {
361    type Output = Scalar;
362    #[allow(non_snake_case)]
363    fn sub(self, rhs: &'a Scalar) -> Scalar {
364        // The UnpackedScalar::sub function produces reduced outputs if the inputs are reduced. By
365        // Scalar invariant #1, this is always the case.
366        UnpackedScalar::sub(&self.unpack(), &rhs.unpack()).pack()
367    }
368}
369
370define_sub_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
371
372impl Neg for &Scalar {
373    type Output = Scalar;
374    #[allow(non_snake_case)]
375    fn neg(self) -> Scalar {
376        let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R);
377        let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R);
378        UnpackedScalar::sub(&UnpackedScalar::ZERO, &self_mod_l).pack()
379    }
380}
381
382impl Neg for Scalar {
383    type Output = Scalar;
384    fn neg(self) -> Scalar {
385        -&self
386    }
387}
388
389impl ConditionallySelectable for Scalar {
390    fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
391        let mut bytes = [0u8; 32];
392        #[allow(clippy::needless_range_loop)]
393        for i in 0..32 {
394            bytes[i] = u8::conditional_select(&a.bytes[i], &b.bytes[i], choice);
395        }
396        Scalar { bytes }
397    }
398}
399
400#[cfg(feature = "serde")]
401use serde::de::Visitor;
402#[cfg(feature = "serde")]
403use serde::{Deserialize, Deserializer, Serialize, Serializer};
404
405#[cfg(feature = "serde")]
406#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
407impl Serialize for Scalar {
408    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
409    where
410        S: Serializer,
411    {
412        use serde::ser::SerializeTuple;
413        let mut tup = serializer.serialize_tuple(32)?;
414        for byte in self.as_bytes().iter() {
415            tup.serialize_element(byte)?;
416        }
417        tup.end()
418    }
419}
420
421#[cfg(feature = "serde")]
422#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
423impl<'de> Deserialize<'de> for Scalar {
424    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
425    where
426        D: Deserializer<'de>,
427    {
428        struct ScalarVisitor;
429
430        impl<'de> Visitor<'de> for ScalarVisitor {
431            type Value = Scalar;
432
433            fn expecting(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
434                formatter.write_str(
435                    "a sequence of 32 bytes whose little-endian interpretation is less than the \
436                    basepoint order ℓ",
437                )
438            }
439
440            fn visit_seq<A>(self, mut seq: A) -> Result<Scalar, A::Error>
441            where
442                A: serde::de::SeqAccess<'de>,
443            {
444                let mut bytes = [0u8; 32];
445                #[allow(clippy::needless_range_loop)]
446                for i in 0..32 {
447                    bytes[i] = seq
448                        .next_element()?
449                        .ok_or_else(|| serde::de::Error::invalid_length(i, &"expected 32 bytes"))?;
450                }
451                Option::from(Scalar::from_canonical_bytes(bytes))
452                    .ok_or_else(|| serde::de::Error::custom("scalar was not canonically encoded"))
453            }
454        }
455
456        deserializer.deserialize_tuple(32, ScalarVisitor)
457    }
458}
459
460impl<T> Product<T> for Scalar
461where
462    T: Borrow<Scalar>,
463{
464    fn product<I>(iter: I) -> Self
465    where
466        I: Iterator<Item = T>,
467    {
468        iter.fold(Scalar::ONE, |acc, item| acc * item.borrow())
469    }
470}
471
472impl<T> Sum<T> for Scalar
473where
474    T: Borrow<Scalar>,
475{
476    fn sum<I>(iter: I) -> Self
477    where
478        I: Iterator<Item = T>,
479    {
480        iter.fold(Scalar::ZERO, |acc, item| acc + item.borrow())
481    }
482}
483
484impl Default for Scalar {
485    fn default() -> Scalar {
486        Scalar::ZERO
487    }
488}
489
490impl From<u8> for Scalar {
491    fn from(x: u8) -> Scalar {
492        let mut s_bytes = [0u8; 32];
493        s_bytes[0] = x;
494        Scalar { bytes: s_bytes }
495    }
496}
497
498impl From<u16> for Scalar {
499    fn from(x: u16) -> Scalar {
500        let mut s_bytes = [0u8; 32];
501        let x_bytes = x.to_le_bytes();
502        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
503        Scalar { bytes: s_bytes }
504    }
505}
506
507impl From<u32> for Scalar {
508    fn from(x: u32) -> Scalar {
509        let mut s_bytes = [0u8; 32];
510        let x_bytes = x.to_le_bytes();
511        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
512        Scalar { bytes: s_bytes }
513    }
514}
515
516impl From<u64> for Scalar {
517    /// Construct a scalar from the given `u64`.
518    ///
519    /// # Inputs
520    ///
521    /// An `u64` to convert to a `Scalar`.
522    ///
523    /// # Returns
524    ///
525    /// A `Scalar` corresponding to the input `u64`.
526    ///
527    /// # Example
528    ///
529    /// ```
530    /// use curve25519_dalek::scalar::Scalar;
531    ///
532    /// let fourtytwo = Scalar::from(42u64);
533    /// let six = Scalar::from(6u64);
534    /// let seven = Scalar::from(7u64);
535    ///
536    /// assert!(fourtytwo == six * seven);
537    /// ```
538    fn from(x: u64) -> Scalar {
539        let mut s_bytes = [0u8; 32];
540        let x_bytes = x.to_le_bytes();
541        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
542        Scalar { bytes: s_bytes }
543    }
544}
545
546impl From<u128> for Scalar {
547    fn from(x: u128) -> Scalar {
548        let mut s_bytes = [0u8; 32];
549        let x_bytes = x.to_le_bytes();
550        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
551        Scalar { bytes: s_bytes }
552    }
553}
554
555#[cfg(feature = "zeroize")]
556impl Zeroize for Scalar {
557    fn zeroize(&mut self) {
558        self.bytes.zeroize();
559    }
560}
561
562impl Scalar {
563    /// The scalar \\( 0 \\).
564    pub const ZERO: Self = Self { bytes: [0u8; 32] };
565
566    /// The scalar \\( 1 \\).
567    pub const ONE: Self = Self {
568        bytes: [
569            1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
570            0, 0, 0,
571        ],
572    };
573
574    #[cfg(any(test, feature = "rand_core"))]
575    /// Return a `Scalar` chosen uniformly at random using a user-provided RNG.
576    ///
577    /// # Inputs
578    ///
579    /// * `rng`: any RNG which implements `CryptoRngCore`
580    ///   (i.e. `CryptoRng` + `RngCore`) interface.
581    ///
582    /// # Returns
583    ///
584    /// A random scalar within \\(\mathbb{Z} / \ell\mathbb{Z}\\).
585    ///
586    /// # Example
587    ///
588    /// ```
589    /// # fn main() {
590    /// use curve25519_dalek::scalar::Scalar;
591    ///
592    /// use rand_core::OsRng;
593    ///
594    /// let mut csprng = OsRng;
595    /// let a: Scalar = Scalar::random(&mut csprng);
596    /// # }
597    pub fn random<R: CryptoRngCore + ?Sized>(rng: &mut R) -> Self {
598        let mut scalar_bytes = [0u8; 64];
599        rng.fill_bytes(&mut scalar_bytes);
600        Scalar::from_bytes_mod_order_wide(&scalar_bytes)
601    }
602
603    #[cfg(feature = "digest")]
604    /// Hash a slice of bytes into a scalar.
605    ///
606    /// Takes a type parameter `D`, which is any `Digest` producing 64
607    /// bytes (512 bits) of output.
608    ///
609    /// Convenience wrapper around `from_hash`.
610    ///
611    /// # Example
612    ///
613    #[cfg_attr(feature = "digest", doc = "```")]
614    #[cfg_attr(not(feature = "digest"), doc = "```ignore")]
615    /// # use curve25519_dalek::scalar::Scalar;
616    /// use sha2::Sha512;
617    ///
618    /// # // Need fn main() here in comment so the doctest compiles
619    /// # // See https://doc.rust-lang.org/book/documentation.html#documentation-as-tests
620    /// # fn main() {
621    /// let msg = "To really appreciate architecture, you may even need to commit a murder";
622    /// let s = Scalar::hash_from_bytes::<Sha512>(msg.as_bytes());
623    /// # }
624    /// ```
625    pub fn hash_from_bytes<D>(input: &[u8]) -> Scalar
626    where
627        D: Digest<OutputSize = U64> + Default,
628    {
629        let mut hash = D::default();
630        hash.update(input);
631        Scalar::from_hash(hash)
632    }
633
634    #[cfg(feature = "digest")]
635    /// Construct a scalar from an existing `Digest` instance.
636    ///
637    /// Use this instead of `hash_from_bytes` if it is more convenient
638    /// to stream data into the `Digest` than to pass a single byte
639    /// slice.
640    ///
641    /// # Example
642    ///
643    /// ```
644    /// # use curve25519_dalek::scalar::Scalar;
645    /// use curve25519_dalek::digest::Update;
646    ///
647    /// use sha2::Digest;
648    /// use sha2::Sha512;
649    ///
650    /// # fn main() {
651    /// let mut h = Sha512::new()
652    ///     .chain("To really appreciate architecture, you may even need to commit a murder.")
653    ///     .chain("While the programs used for The Manhattan Transcripts are of the most extreme")
654    ///     .chain("nature, they also parallel the most common formula plot: the archetype of")
655    ///     .chain("murder. Other phantasms were occasionally used to underline the fact that")
656    ///     .chain("perhaps all architecture, rather than being about functional standards, is")
657    ///     .chain("about love and death.");
658    ///
659    /// let s = Scalar::from_hash(h);
660    ///
661    /// println!("{:?}", s.to_bytes());
662    /// assert_eq!(
663    ///     s.to_bytes(),
664    ///     [  21,  88, 208, 252,  63, 122, 210, 152,
665    ///       154,  38,  15,  23,  16, 167,  80, 150,
666    ///       192, 221,  77, 226,  62,  25, 224, 148,
667    ///       239,  48, 176,  10, 185,  69, 168,  11, ],
668    /// );
669    /// # }
670    /// ```
671    pub fn from_hash<D>(hash: D) -> Scalar
672    where
673        D: Digest<OutputSize = U64>,
674    {
675        let mut output = [0u8; 64];
676        output.copy_from_slice(hash.finalize().as_slice());
677        Scalar::from_bytes_mod_order_wide(&output)
678    }
679
680    /// Convert this `Scalar` to its underlying sequence of bytes.
681    ///
682    /// # Example
683    ///
684    /// ```
685    /// use curve25519_dalek::scalar::Scalar;
686    ///
687    /// let s: Scalar = Scalar::ZERO;
688    ///
689    /// assert!(s.to_bytes() == [0u8; 32]);
690    /// ```
691    pub const fn to_bytes(&self) -> [u8; 32] {
692        self.bytes
693    }
694
695    /// View the little-endian byte encoding of the integer representing this Scalar.
696    ///
697    /// # Example
698    ///
699    /// ```
700    /// use curve25519_dalek::scalar::Scalar;
701    ///
702    /// let s: Scalar = Scalar::ZERO;
703    ///
704    /// assert!(s.as_bytes() == &[0u8; 32]);
705    /// ```
706    pub const fn as_bytes(&self) -> &[u8; 32] {
707        &self.bytes
708    }
709
710    /// Given a nonzero `Scalar`, compute its multiplicative inverse.
711    ///
712    /// # Warning
713    ///
714    /// `self` **MUST** be nonzero.  If you cannot
715    /// *prove* that this is the case, you **SHOULD NOT USE THIS
716    /// FUNCTION**.
717    ///
718    /// # Returns
719    ///
720    /// The multiplicative inverse of the this `Scalar`.
721    ///
722    /// # Example
723    ///
724    /// ```
725    /// use curve25519_dalek::scalar::Scalar;
726    ///
727    /// // x = 2238329342913194256032495932344128051776374960164957527413114840482143558222
728    /// let X: Scalar = Scalar::from_bytes_mod_order([
729    ///         0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84,
730    ///         0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2, 0x7d, 0x52,
731    ///         0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44,
732    ///         0xd4, 0x49, 0xf4, 0xa8, 0x79, 0xd9, 0xf2, 0x04,
733    ///     ]);
734    /// // 1/x = 6859937278830797291664592131120606308688036382723378951768035303146619657244
735    /// let XINV: Scalar = Scalar::from_bytes_mod_order([
736    ///         0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb,
737    ///         0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01, 0x63, 0x47,
738    ///         0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96,
739    ///         0xd5, 0x0b, 0xcd, 0x7a, 0x3f, 0x96, 0x2a, 0x0f,
740    ///     ]);
741    ///
742    /// let inv_X: Scalar = X.invert();
743    /// assert!(XINV == inv_X);
744    /// let should_be_one: Scalar = &inv_X * &X;
745    /// assert!(should_be_one == Scalar::ONE);
746    /// ```
747    pub fn invert(&self) -> Scalar {
748        self.unpack().invert().pack()
749    }
750
751    /// Given a slice of nonzero (possibly secret) `Scalar`s,
752    /// compute their inverses in a batch.
753    ///
754    /// # Return
755    ///
756    /// Each element of `inputs` is replaced by its inverse.
757    ///
758    /// The product of all inverses is returned.
759    ///
760    /// # Warning
761    ///
762    /// All input `Scalars` **MUST** be nonzero.  If you cannot
763    /// *prove* that this is the case, you **SHOULD NOT USE THIS
764    /// FUNCTION**.
765    ///
766    /// # Example
767    ///
768    /// ```
769    /// # use curve25519_dalek::scalar::Scalar;
770    /// # fn main() {
771    /// let mut scalars = [
772    ///     Scalar::from(3u64),
773    ///     Scalar::from(5u64),
774    ///     Scalar::from(7u64),
775    ///     Scalar::from(11u64),
776    /// ];
777    ///
778    /// let allinv = Scalar::batch_invert(&mut scalars);
779    ///
780    /// assert_eq!(allinv, Scalar::from(3*5*7*11u64).invert());
781    /// assert_eq!(scalars[0], Scalar::from(3u64).invert());
782    /// assert_eq!(scalars[1], Scalar::from(5u64).invert());
783    /// assert_eq!(scalars[2], Scalar::from(7u64).invert());
784    /// assert_eq!(scalars[3], Scalar::from(11u64).invert());
785    /// # }
786    /// ```
787    #[cfg(feature = "alloc")]
788    pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar {
789        // This code is essentially identical to the FieldElement
790        // implementation, and is documented there.  Unfortunately,
791        // it's not easy to write it generically, since here we want
792        // to use `UnpackedScalar`s internally, and `Scalar`s
793        // externally, but there's no corresponding distinction for
794        // field elements.
795
796        let n = inputs.len();
797        let one: UnpackedScalar = Scalar::ONE.unpack().as_montgomery();
798
799        let mut scratch = vec![one; n];
800
801        // Keep an accumulator of all of the previous products
802        let mut acc = Scalar::ONE.unpack().as_montgomery();
803
804        // Pass through the input vector, recording the previous
805        // products in the scratch space
806        for (input, scratch) in inputs.iter_mut().zip(scratch.iter_mut()) {
807            *scratch = acc;
808
809            // Avoid unnecessary Montgomery multiplication in second pass by
810            // keeping inputs in Montgomery form
811            let tmp = input.unpack().as_montgomery();
812            *input = tmp.pack();
813            acc = UnpackedScalar::montgomery_mul(&acc, &tmp);
814        }
815
816        // acc is nonzero iff all inputs are nonzero
817        debug_assert!(acc.pack() != Scalar::ZERO);
818
819        // Compute the inverse of all products
820        acc = acc.montgomery_invert().from_montgomery();
821
822        // We need to return the product of all inverses later
823        let ret = acc.pack();
824
825        // Pass through the vector backwards to compute the inverses
826        // in place
827        for (input, scratch) in inputs.iter_mut().rev().zip(scratch.iter().rev()) {
828            let tmp = UnpackedScalar::montgomery_mul(&acc, &input.unpack());
829            *input = UnpackedScalar::montgomery_mul(&acc, scratch).pack();
830            acc = tmp;
831        }
832
833        #[cfg(feature = "zeroize")]
834        Zeroize::zeroize(&mut scratch);
835
836        ret
837    }
838
839    /// Get the bits of the scalar, in little-endian order
840    pub(crate) fn bits_le(&self) -> impl DoubleEndedIterator<Item = bool> + '_ {
841        (0..256).map(|i| {
842            // As i runs from 0..256, the bottom 3 bits index the bit, while the upper bits index
843            // the byte. Since self.bytes is little-endian at the byte level, this iterator is
844            // little-endian on the bit level
845            ((self.bytes[i >> 3] >> (i & 7)) & 1u8) == 1
846        })
847    }
848
849    /// Compute a width-\\(w\\) "Non-Adjacent Form" of this scalar.
850    ///
851    /// A width-\\(w\\) NAF of a positive integer \\(k\\) is an expression
852    /// $$
853    /// k = \sum_{i=0}\^m n\_i 2\^i,
854    /// $$
855    /// where each nonzero
856    /// coefficient \\(n\_i\\) is odd and bounded by \\(|n\_i| < 2\^{w-1}\\),
857    /// \\(n\_{m-1}\\) is nonzero, and at most one of any \\(w\\) consecutive
858    /// coefficients is nonzero.  (Hankerson, Menezes, Vanstone; def 3.32).
859    ///
860    /// The length of the NAF is at most one more than the length of
861    /// the binary representation of \\(k\\).  This is why the
862    /// `Scalar` type maintains an invariant (invariant #1) that the top bit is
863    /// \\(0\\), so that the NAF of a scalar has at most 256 digits.
864    ///
865    /// Intuitively, this is like a binary expansion, except that we
866    /// allow some coefficients to grow in magnitude up to
867    /// \\(2\^{w-1}\\) so that the nonzero coefficients are as sparse
868    /// as possible.
869    ///
870    /// When doing scalar multiplication, we can then use a lookup
871    /// table of precomputed multiples of a point to add the nonzero
872    /// terms \\( k_i P \\).  Using signed digits cuts the table size
873    /// in half, and using odd digits cuts the table size in half
874    /// again.
875    ///
876    /// To compute a \\(w\\)-NAF, we use a modification of Algorithm 3.35 of HMV:
877    ///
878    /// 1. \\( i \gets 0 \\)
879    /// 2. While \\( k \ge 1 \\):
880    ///     1. If \\(k\\) is odd, \\( n_i \gets k \operatorname{mods} 2^w \\), \\( k \gets k - n_i \\).
881    ///     2. If \\(k\\) is even, \\( n_i \gets 0 \\).
882    ///     3. \\( k \gets k / 2 \\), \\( i \gets i + 1 \\).
883    /// 3. Return \\( n_0, n_1, ... , \\)
884    ///
885    /// Here \\( \bar x = x \operatorname{mods} 2^w \\) means the
886    /// \\( \bar x \\) with \\( \bar x \equiv x \pmod{2^w} \\) and
887    /// \\( -2^{w-1} \leq \bar x < 2^{w-1} \\).
888    ///
889    /// We implement this by scanning across the bits of \\(k\\) from
890    /// least-significant bit to most-significant-bit.
891    /// Write the bits of \\(k\\) as
892    /// $$
893    /// k = \sum\_{i=0}\^m k\_i 2^i,
894    /// $$
895    /// and split the sum as
896    /// $$
897    /// k = \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \sum\_{i=0} k\_{i+w} 2^i
898    /// $$
899    /// where the first part is \\( k \mod 2^w \\).
900    ///
901    /// If \\( k \mod 2^w\\) is odd, and \\( k \mod 2^w < 2^{w-1} \\), then we emit
902    /// \\( n_0 = k \mod 2^w \\).  Instead of computing
903    /// \\( k - n_0 \\), we just advance \\(w\\) bits and reindex.
904    ///
905    /// If \\( k \mod 2^w\\) is odd, and \\( k \mod 2^w \ge 2^{w-1} \\), then
906    /// \\( n_0 = k \operatorname{mods} 2^w = k \mod 2^w - 2^w \\).
907    /// The quantity \\( k - n_0 \\) is
908    /// $$
909    /// \begin{aligned}
910    /// k - n_0 &= \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \sum\_{i=0} k\_{i+w} 2^i
911    ///          - \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \\\\
912    /// &= 2^w + 2^w \sum\_{i=0} k\_{i+w} 2^i
913    /// \end{aligned}
914    /// $$
915    /// so instead of computing the subtraction, we can set a carry
916    /// bit, advance \\(w\\) bits, and reindex.
917    ///
918    /// If \\( k \mod 2^w\\) is even, we emit \\(0\\), advance 1 bit
919    /// and reindex.  In fact, by setting all digits to \\(0\\)
920    /// initially, we don't need to emit anything.
921    pub(crate) fn non_adjacent_form(&self, w: usize) -> [i8; 256] {
922        // required by the NAF definition
923        debug_assert!(w >= 2);
924        // required so that the NAF digits fit in i8
925        debug_assert!(w <= 8);
926
927        let mut naf = [0i8; 256];
928
929        let mut x_u64 = [0u64; 5];
930        read_le_u64_into(&self.bytes, &mut x_u64[0..4]);
931
932        let width = 1 << w;
933        let window_mask = width - 1;
934
935        let mut pos = 0;
936        let mut carry = 0;
937        while pos < 256 {
938            // Construct a buffer of bits of the scalar, starting at bit `pos`
939            let u64_idx = pos / 64;
940            let bit_idx = pos % 64;
941            let bit_buf: u64 = if bit_idx < 64 - w {
942                // This window's bits are contained in a single u64
943                x_u64[u64_idx] >> bit_idx
944            } else {
945                // Combine the current u64's bits with the bits from the next u64
946                (x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx))
947            };
948
949            // Add the carry into the current window
950            let window = carry + (bit_buf & window_mask);
951
952            if window & 1 == 0 {
953                // If the window value is even, preserve the carry and continue.
954                // Why is the carry preserved?
955                // If carry == 0 and window & 1 == 0, then the next carry should be 0
956                // If carry == 1 and window & 1 == 0, then bit_buf & 1 == 1 so the next carry should be 1
957                pos += 1;
958                continue;
959            }
960
961            if window < width / 2 {
962                carry = 0;
963                naf[pos] = window as i8;
964            } else {
965                carry = 1;
966                naf[pos] = (window as i8).wrapping_sub(width as i8);
967            }
968
969            pos += w;
970        }
971
972        naf
973    }
974
975    /// Write this scalar in radix 16, with coefficients in \\([-8,8)\\),
976    /// i.e., compute \\(a\_i\\) such that
977    /// $$
978    ///    a = a\_0 + a\_1 16\^1 + \cdots + a_{63} 16\^{63},
979    /// $$
980    /// with \\(-8 \leq a_i < 8\\) for \\(0 \leq i < 63\\) and \\(-8 \leq a_{63} \leq 8\\).
981    ///
982    /// The largest value that can be decomposed like this is just over \\(2^{255}\\). Thus, in
983    /// order to not error, the top bit MUST NOT be set, i.e., `Self` MUST be less than
984    /// \\(2^{255}\\).
985    pub(crate) fn as_radix_16(&self) -> [i8; 64] {
986        debug_assert!(self[31] <= 127);
987        let mut output = [0i8; 64];
988
989        // Step 1: change radix.
990        // Convert from radix 256 (bytes) to radix 16 (nibbles)
991        #[allow(clippy::identity_op)]
992        #[inline(always)]
993        fn bot_half(x: u8) -> u8 {
994            (x >> 0) & 15
995        }
996        #[inline(always)]
997        fn top_half(x: u8) -> u8 {
998            (x >> 4) & 15
999        }
1000
1001        for i in 0..32 {
1002            output[2 * i] = bot_half(self[i]) as i8;
1003            output[2 * i + 1] = top_half(self[i]) as i8;
1004        }
1005        // Precondition note: since self[31] <= 127, output[63] <= 7
1006
1007        // Step 2: recenter coefficients from [0,16) to [-8,8)
1008        for i in 0..63 {
1009            let carry = (output[i] + 8) >> 4;
1010            output[i] -= carry << 4;
1011            output[i + 1] += carry;
1012        }
1013        // Precondition note: output[63] is not recentered.  It
1014        // increases by carry <= 1.  Thus output[63] <= 8.
1015
1016        output
1017    }
1018
1019    /// Returns a size hint indicating how many entries of the return
1020    /// value of `to_radix_2w` are nonzero.
1021    #[cfg(any(feature = "alloc", all(test, feature = "precomputed-tables")))]
1022    pub(crate) fn to_radix_2w_size_hint(w: usize) -> usize {
1023        debug_assert!(w >= 4);
1024        debug_assert!(w <= 8);
1025
1026        let digits_count = match w {
1027            4..=7 => (256 + w - 1) / w,
1028            // See comment in to_radix_2w on handling the terminal carry.
1029            8 => (256 + w - 1) / w + 1_usize,
1030            _ => panic!("invalid radix parameter"),
1031        };
1032
1033        debug_assert!(digits_count <= 64);
1034        digits_count
1035    }
1036
1037    /// Creates a representation of a Scalar in radix \\( 2^w \\) with \\(w = 4, 5, 6, 7, 8\\) for
1038    /// use with the Pippenger algorithm. Higher radixes are not supported to save cache space.
1039    /// Radix 256 is near-optimal even for very large inputs.
1040    ///
1041    /// Radix below 16 or above 256 is prohibited.
1042    /// This method returns digits in a fixed-sized array, excess digits are zeroes.
1043    ///
1044    /// For radix 16, `Self` must be less than \\(2^{255}\\). This is because most integers larger
1045    /// than \\(2^{255}\\) are unrepresentable in the form described below for \\(w = 4\\). This
1046    /// would be true for \\(w = 8\\) as well, but it is compensated for by increasing the size
1047    /// hint by 1.
1048    ///
1049    /// ## Scalar representation
1050    ///
1051    /// Radix \\(2\^w\\), with \\(n = ceil(256/w)\\) coefficients in \\([-(2\^w)/2,(2\^w)/2)\\),
1052    /// i.e., scalar is represented using digits \\(a\_i\\) such that
1053    /// $$
1054    ///    a = a\_0 + a\_1 2\^1w + \cdots + a_{n-1} 2\^{w*(n-1)},
1055    /// $$
1056    /// with \\(-2\^w/2 \leq a_i < 2\^w/2\\) for \\(0 \leq i < (n-1)\\) and \\(-2\^w/2 \leq a_{n-1} \leq 2\^w/2\\).
1057    ///
1058    #[cfg(any(feature = "alloc", feature = "precomputed-tables"))]
1059    pub(crate) fn as_radix_2w(&self, w: usize) -> [i8; 64] {
1060        debug_assert!(w >= 4);
1061        debug_assert!(w <= 8);
1062
1063        if w == 4 {
1064            return self.as_radix_16();
1065        }
1066
1067        // Scalar formatted as four `u64`s with carry bit packed into the highest bit.
1068        let mut scalar64x4 = [0u64; 4];
1069        read_le_u64_into(&self.bytes, &mut scalar64x4[0..4]);
1070
1071        let radix: u64 = 1 << w;
1072        let window_mask: u64 = radix - 1;
1073
1074        let mut carry = 0u64;
1075        let mut digits = [0i8; 64];
1076        let digits_count = (256 + w - 1) / w;
1077        #[allow(clippy::needless_range_loop)]
1078        for i in 0..digits_count {
1079            // Construct a buffer of bits of the scalar, starting at `bit_offset`.
1080            let bit_offset = i * w;
1081            let u64_idx = bit_offset / 64;
1082            let bit_idx = bit_offset % 64;
1083
1084            // Read the bits from the scalar
1085            let bit_buf: u64 = if bit_idx < 64 - w || u64_idx == 3 {
1086                // This window's bits are contained in a single u64,
1087                // or it's the last u64 anyway.
1088                scalar64x4[u64_idx] >> bit_idx
1089            } else {
1090                // Combine the current u64's bits with the bits from the next u64
1091                (scalar64x4[u64_idx] >> bit_idx) | (scalar64x4[1 + u64_idx] << (64 - bit_idx))
1092            };
1093
1094            // Read the actual coefficient value from the window
1095            let coef = carry + (bit_buf & window_mask); // coef = [0, 2^r)
1096
1097            // Recenter coefficients from [0,2^w) to [-2^w/2, 2^w/2)
1098            carry = (coef + (radix / 2)) >> w;
1099            digits[i] = ((coef as i64) - (carry << w) as i64) as i8;
1100        }
1101
1102        // When 4 < w < 8, we can fold the final carry onto the last digit d,
1103        // because d < 2^w/2 so d + carry*2^w = d + 1*2^w < 2^(w+1) < 2^8.
1104        //
1105        // When w = 8, we can't fit carry*2^w into an i8.  This should
1106        // not happen anyways, because the final carry will be 0 for
1107        // reduced scalars, but Scalar invariant #1 allows 255-bit scalars.
1108        // To handle this, we expand the size_hint by 1 when w=8,
1109        // and accumulate the final carry onto another digit.
1110        match w {
1111            8 => digits[digits_count] += carry as i8,
1112            _ => digits[digits_count - 1] += (carry << w) as i8,
1113        }
1114
1115        digits
1116    }
1117
1118    /// Unpack this `Scalar` to an `UnpackedScalar` for faster arithmetic.
1119    pub(crate) fn unpack(&self) -> UnpackedScalar {
1120        UnpackedScalar::from_bytes(&self.bytes)
1121    }
1122
1123    /// Reduce this `Scalar` modulo \\(\ell\\).
1124    #[allow(non_snake_case)]
1125    fn reduce(&self) -> Scalar {
1126        let x = self.unpack();
1127        let xR = UnpackedScalar::mul_internal(&x, &constants::R);
1128        let x_mod_l = UnpackedScalar::montgomery_reduce(&xR);
1129        x_mod_l.pack()
1130    }
1131
1132    /// Check whether this `Scalar` is the canonical representative mod \\(\ell\\). This is not
1133    /// public because any `Scalar` that is publicly observed is reduced, by scalar invariant #2.
1134    fn is_canonical(&self) -> Choice {
1135        self.ct_eq(&self.reduce())
1136    }
1137}
1138
1139impl UnpackedScalar {
1140    /// Pack the limbs of this `UnpackedScalar` into a `Scalar`.
1141    fn pack(&self) -> Scalar {
1142        Scalar {
1143            bytes: self.to_bytes(),
1144        }
1145    }
1146
1147    /// Inverts an UnpackedScalar in Montgomery form.
1148    #[rustfmt::skip] // keep alignment of addition chain and squarings
1149    #[allow(clippy::just_underscores_and_digits)]
1150    pub fn montgomery_invert(&self) -> UnpackedScalar {
1151        // Uses the addition chain from
1152        // https://briansmith.org/ecc-inversion-addition-chains-01#curve25519_scalar_inversion
1153        let    _1 = *self;
1154        let   _10 = _1.montgomery_square();
1155        let  _100 = _10.montgomery_square();
1156        let   _11 = UnpackedScalar::montgomery_mul(&_10,     &_1);
1157        let  _101 = UnpackedScalar::montgomery_mul(&_10,    &_11);
1158        let  _111 = UnpackedScalar::montgomery_mul(&_10,   &_101);
1159        let _1001 = UnpackedScalar::montgomery_mul(&_10,   &_111);
1160        let _1011 = UnpackedScalar::montgomery_mul(&_10,  &_1001);
1161        let _1111 = UnpackedScalar::montgomery_mul(&_100, &_1011);
1162
1163        // _10000
1164        let mut y = UnpackedScalar::montgomery_mul(&_1111, &_1);
1165
1166        #[inline]
1167        fn square_multiply(y: &mut UnpackedScalar, squarings: usize, x: &UnpackedScalar) {
1168            let mut i = 0;
1169            while i <  squarings {
1170                *y = y.montgomery_square();
1171                i += 1;
1172            }
1173            *y = UnpackedScalar::montgomery_mul(y, x);
1174        }
1175
1176        square_multiply(&mut y, 123 + 3, &_101);
1177        square_multiply(&mut y,   2 + 2, &_11);
1178        square_multiply(&mut y,   1 + 4, &_1111);
1179        square_multiply(&mut y,   1 + 4, &_1111);
1180        square_multiply(&mut y,       4, &_1001);
1181        square_multiply(&mut y,       2, &_11);
1182        square_multiply(&mut y,   1 + 4, &_1111);
1183        square_multiply(&mut y,   1 + 3, &_101);
1184        square_multiply(&mut y,   3 + 3, &_101);
1185        square_multiply(&mut y,       3, &_111);
1186        square_multiply(&mut y,   1 + 4, &_1111);
1187        square_multiply(&mut y,   2 + 3, &_111);
1188        square_multiply(&mut y,   2 + 2, &_11);
1189        square_multiply(&mut y,   1 + 4, &_1011);
1190        square_multiply(&mut y,   2 + 4, &_1011);
1191        square_multiply(&mut y,   6 + 4, &_1001);
1192        square_multiply(&mut y,   2 + 2, &_11);
1193        square_multiply(&mut y,   3 + 2, &_11);
1194        square_multiply(&mut y,   3 + 2, &_11);
1195        square_multiply(&mut y,   1 + 4, &_1001);
1196        square_multiply(&mut y,   1 + 3, &_111);
1197        square_multiply(&mut y,   2 + 4, &_1111);
1198        square_multiply(&mut y,   1 + 4, &_1011);
1199        square_multiply(&mut y,       3, &_101);
1200        square_multiply(&mut y,   2 + 4, &_1111);
1201        square_multiply(&mut y,       3, &_101);
1202        square_multiply(&mut y,   1 + 2, &_11);
1203
1204        y
1205    }
1206
1207    /// Inverts an UnpackedScalar not in Montgomery form.
1208    pub fn invert(&self) -> UnpackedScalar {
1209        self.as_montgomery().montgomery_invert().from_montgomery()
1210    }
1211}
1212
1213#[cfg(feature = "group")]
1214impl Field for Scalar {
1215    const ZERO: Self = Self::ZERO;
1216    const ONE: Self = Self::ONE;
1217
1218    fn random(mut rng: impl RngCore) -> Self {
1219        // NOTE: this is duplicated due to different `rng` bounds
1220        let mut scalar_bytes = [0u8; 64];
1221        rng.fill_bytes(&mut scalar_bytes);
1222        Self::from_bytes_mod_order_wide(&scalar_bytes)
1223    }
1224
1225    fn square(&self) -> Self {
1226        self * self
1227    }
1228
1229    fn double(&self) -> Self {
1230        self + self
1231    }
1232
1233    fn invert(&self) -> CtOption<Self> {
1234        CtOption::new(self.invert(), !self.is_zero())
1235    }
1236
1237    fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) {
1238        #[allow(unused_qualifications)]
1239        group::ff::helpers::sqrt_ratio_generic(num, div)
1240    }
1241
1242    fn sqrt(&self) -> CtOption<Self> {
1243        #[allow(unused_qualifications)]
1244        group::ff::helpers::sqrt_tonelli_shanks(
1245            self,
1246            [
1247                0xcb02_4c63_4b9e_ba7d,
1248                0x029b_df3b_d45e_f39a,
1249                0x0000_0000_0000_0000,
1250                0x0200_0000_0000_0000,
1251            ],
1252        )
1253    }
1254}
1255
1256#[cfg(feature = "group")]
1257impl PrimeField for Scalar {
1258    type Repr = [u8; 32];
1259
1260    fn from_repr(repr: Self::Repr) -> CtOption<Self> {
1261        Self::from_canonical_bytes(repr)
1262    }
1263
1264    fn from_repr_vartime(repr: Self::Repr) -> Option<Self> {
1265        // Check that the high bit is not set
1266        if (repr[31] >> 7) != 0u8 {
1267            return None;
1268        }
1269
1270        let candidate = Scalar { bytes: repr };
1271
1272        if candidate == candidate.reduce() {
1273            Some(candidate)
1274        } else {
1275            None
1276        }
1277    }
1278
1279    fn to_repr(&self) -> Self::Repr {
1280        self.to_bytes()
1281    }
1282
1283    fn is_odd(&self) -> Choice {
1284        Choice::from(self.as_bytes()[0] & 1)
1285    }
1286
1287    const MODULUS: &'static str =
1288        "0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed";
1289    const NUM_BITS: u32 = 253;
1290    const CAPACITY: u32 = 252;
1291
1292    const TWO_INV: Self = Self {
1293        bytes: [
1294            0xf7, 0xe9, 0x7a, 0x2e, 0x8d, 0x31, 0x09, 0x2c, 0x6b, 0xce, 0x7b, 0x51, 0xef, 0x7c,
1295            0x6f, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1296            0x00, 0x00, 0x00, 0x08,
1297        ],
1298    };
1299    const MULTIPLICATIVE_GENERATOR: Self = Self {
1300        bytes: [
1301            2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1302            0, 0, 0,
1303        ],
1304    };
1305    const S: u32 = 2;
1306    const ROOT_OF_UNITY: Self = Self {
1307        bytes: [
1308            0xd4, 0x07, 0xbe, 0xeb, 0xdf, 0x75, 0x87, 0xbe, 0xfe, 0x83, 0xce, 0x42, 0x53, 0x56,
1309            0xf0, 0x0e, 0x7a, 0xc2, 0xc1, 0xab, 0x60, 0x6d, 0x3d, 0x7d, 0xe7, 0x81, 0x79, 0xe0,
1310            0x10, 0x73, 0x4a, 0x09,
1311        ],
1312    };
1313    const ROOT_OF_UNITY_INV: Self = Self {
1314        bytes: [
1315            0x19, 0xcc, 0x37, 0x71, 0x3a, 0xed, 0x8a, 0x99, 0xd7, 0x18, 0x29, 0x60, 0x8b, 0xa3,
1316            0xee, 0x05, 0x86, 0x3d, 0x3e, 0x54, 0x9f, 0x92, 0xc2, 0x82, 0x18, 0x7e, 0x86, 0x1f,
1317            0xef, 0x8c, 0xb5, 0x06,
1318        ],
1319    };
1320    const DELTA: Self = Self {
1321        bytes: [
1322            16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1323            0, 0, 0,
1324        ],
1325    };
1326}
1327
1328#[cfg(feature = "group-bits")]
1329impl PrimeFieldBits for Scalar {
1330    type ReprBits = [u8; 32];
1331
1332    fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
1333        self.to_repr().into()
1334    }
1335
1336    fn char_le_bits() -> FieldBits<Self::ReprBits> {
1337        constants::BASEPOINT_ORDER_PRIVATE.to_bytes().into()
1338    }
1339}
1340
1341#[cfg(feature = "group")]
1342impl FromUniformBytes<64> for Scalar {
1343    fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
1344        Scalar::from_bytes_mod_order_wide(bytes)
1345    }
1346}
1347
1348/// Read one or more u64s stored as little endian bytes.
1349///
1350/// ## Panics
1351/// Panics if `src.len() != 8 * dst.len()`.
1352fn read_le_u64_into(src: &[u8], dst: &mut [u64]) {
1353    assert!(src.len() == 8 * dst.len());
1354    let mut i = 0;
1355    while i < dst.len() {
1356        let start = i * 8;
1357        dst[i] = u64::from_le_bytes([
1358            src[start],
1359            src[start + 1],
1360            src[start + 2],
1361            src[start + 3],
1362            src[start + 4],
1363            src[start + 5],
1364            src[start + 6],
1365            src[start + 7],
1366        ]);
1367        i += 1;
1368    }
1369}
1370
1371/// _Clamps_ the given little-endian representation of a 32-byte integer. Clamping the value puts
1372/// it in the range:
1373///
1374/// **n ∈ 2^254 + 8\*{0, 1, 2, 3, . . ., 2^251 − 1}**
1375///
1376/// # Explanation of clamping
1377///
1378/// For Curve25519, h = 8, and multiplying by 8 is the same as a binary left-shift by 3 bits.
1379/// If you take a secret scalar value between 2^251 and 2^252 – 1 and left-shift by 3 bits
1380/// then you end up with a 255-bit number with the most significant bit set to 1 and
1381/// the least-significant three bits set to 0.
1382///
1383/// The Curve25519 clamping operation takes **an arbitrary 256-bit random value** and
1384/// clears the most-significant bit (making it a 255-bit number), sets the next bit, and then
1385/// clears the 3 least-significant bits. In other words, it directly creates a scalar value that is
1386/// in the right form and pre-multiplied by the cofactor.
1387///
1388/// See [here](https://neilmadden.blog/2020/05/28/whats-the-curve25519-clamping-all-about/) for
1389/// more details.
1390#[must_use]
1391pub const fn clamp_integer(mut bytes: [u8; 32]) -> [u8; 32] {
1392    bytes[0] &= 0b1111_1000;
1393    bytes[31] &= 0b0111_1111;
1394    bytes[31] |= 0b0100_0000;
1395    bytes
1396}
1397
1398#[cfg(test)]
1399pub(crate) mod test {
1400    use super::*;
1401
1402    #[cfg(feature = "alloc")]
1403    use alloc::vec::Vec;
1404
1405    /// x = 2238329342913194256032495932344128051776374960164957527413114840482143558222
1406    pub static X: Scalar = Scalar {
1407        bytes: [
1408            0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84, 0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2,
1409            0x7d, 0x52, 0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44, 0xd4, 0x49, 0xf4, 0xa8,
1410            0x79, 0xd9, 0xf2, 0x04,
1411        ],
1412    };
1413    /// 1/x = 6859937278830797291664592131120606308688036382723378951768035303146619657244
1414    pub static XINV: Scalar = Scalar {
1415        bytes: [
1416            0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb, 0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01,
1417            0x63, 0x47, 0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96, 0xd5, 0x0b, 0xcd, 0x7a,
1418            0x3f, 0x96, 0x2a, 0x0f,
1419        ],
1420    };
1421    /// y = 2592331292931086675770238855846338635550719849568364935475441891787804997264
1422    pub static Y: Scalar = Scalar {
1423        bytes: [
1424            0x90, 0x76, 0x33, 0xfe, 0x1c, 0x4b, 0x66, 0xa4, 0xa2, 0x8d, 0x2d, 0xd7, 0x67, 0x83,
1425            0x86, 0xc3, 0x53, 0xd0, 0xde, 0x54, 0x55, 0xd4, 0xfc, 0x9d, 0xe8, 0xef, 0x7a, 0xc3,
1426            0x1f, 0x35, 0xbb, 0x05,
1427        ],
1428    };
1429
1430    /// The largest scalar that satisfies invariant #1, i.e., the largest scalar with the top bit
1431    /// set to 0. Since this scalar violates invariant #2, i.e., it's greater than the modulus `l`,
1432    /// addition and subtraction are broken. The only thing you can do with this is scalar-point
1433    /// multiplication (and actually also scalar-scalar multiplication, but that's just a quirk of
1434    /// our implementation).
1435    pub(crate) static LARGEST_UNREDUCED_SCALAR: Scalar = Scalar {
1436        bytes: [
1437            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1438            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1439            0xff, 0xff, 0xff, 0x7f,
1440        ],
1441    };
1442
1443    /// x*y = 5690045403673944803228348699031245560686958845067437804563560795922180092780
1444    static X_TIMES_Y: Scalar = Scalar {
1445        bytes: [
1446            0x6c, 0x33, 0x74, 0xa1, 0x89, 0x4f, 0x62, 0x21, 0x0a, 0xaa, 0x2f, 0xe1, 0x86, 0xa6,
1447            0xf9, 0x2c, 0xe0, 0xaa, 0x75, 0xc2, 0x77, 0x95, 0x81, 0xc2, 0x95, 0xfc, 0x08, 0x17,
1448            0x9a, 0x73, 0x94, 0x0c,
1449        ],
1450    };
1451
1452    /// sage: l = 2^252 + 27742317777372353535851937790883648493
1453    /// sage: big = 2^256 - 1
1454    /// sage: repr((big % l).digits(256))
1455    static CANONICAL_2_256_MINUS_1: Scalar = Scalar {
1456        bytes: [
1457            28, 149, 152, 141, 116, 49, 236, 214, 112, 207, 125, 115, 244, 91, 239, 198, 254, 255,
1458            255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15,
1459        ],
1460    };
1461
1462    static A_SCALAR: Scalar = Scalar {
1463        bytes: [
1464            0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
1465            0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
1466            0x23, 0x76, 0xef, 0x09,
1467        ],
1468    };
1469
1470    static A_NAF: [i8; 256] = [
1471        0, 13, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, -11, 0, 0, 0, 0, 3, 0, 0,
1472        0, 0, 1, 0, 0, 0, 0, 9, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 11, 0, 0, 0, 0,
1473        11, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
1474        0, -1, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, -15, 0, 0, 0, 0, -7, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 5,
1475        0, 0, 0, 0, 13, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, -11, 0, 0, 0, 0, -7, 0, 0, 0, 0, -13, 0, 0,
1476        0, 0, 11, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1477        7, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 15,
1478        0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, -15, 0,
1479        0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1480    ];
1481
1482    const BASEPOINT_ORDER_MINUS_ONE: Scalar = Scalar {
1483        bytes: [
1484            0xec, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9,
1485            0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1486            0x00, 0x00, 0x00, 0x10,
1487        ],
1488    };
1489
1490    /// The largest clamped integer
1491    static LARGEST_CLAMPED_INTEGER: [u8; 32] = clamp_integer(LARGEST_UNREDUCED_SCALAR.bytes);
1492
1493    #[test]
1494    fn fuzzer_testcase_reduction() {
1495        // LE bytes of 24519928653854221733733552434404946937899825954937634815
1496        let a_bytes = [
1497            255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
1498            255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1499        ];
1500        // LE bytes of 4975441334397345751130612518500927154628011511324180036903450236863266160640
1501        let b_bytes = [
1502            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 210, 210,
1503            210, 255, 255, 255, 255, 10,
1504        ];
1505        // LE bytes of 6432735165214683820902750800207468552549813371247423777071615116673864412038
1506        let c_bytes = [
1507            134, 171, 119, 216, 180, 128, 178, 62, 171, 132, 32, 62, 34, 119, 104, 193, 47, 215,
1508            181, 250, 14, 207, 172, 93, 75, 207, 211, 103, 144, 204, 56, 14,
1509        ];
1510
1511        let a = Scalar::from_bytes_mod_order(a_bytes);
1512        let b = Scalar::from_bytes_mod_order(b_bytes);
1513        let c = Scalar::from_bytes_mod_order(c_bytes);
1514
1515        let mut tmp = [0u8; 64];
1516
1517        // also_a = (a mod l)
1518        tmp[0..32].copy_from_slice(&a_bytes[..]);
1519        let also_a = Scalar::from_bytes_mod_order_wide(&tmp);
1520
1521        // also_b = (b mod l)
1522        tmp[0..32].copy_from_slice(&b_bytes[..]);
1523        let also_b = Scalar::from_bytes_mod_order_wide(&tmp);
1524
1525        let expected_c = a * b;
1526        let also_expected_c = also_a * also_b;
1527
1528        assert_eq!(c, expected_c);
1529        assert_eq!(c, also_expected_c);
1530    }
1531
1532    #[test]
1533    fn non_adjacent_form_test_vector() {
1534        let naf = A_SCALAR.non_adjacent_form(5);
1535        for i in 0..256 {
1536            assert_eq!(naf[i], A_NAF[i]);
1537        }
1538    }
1539
1540    fn non_adjacent_form_iter(w: usize, x: &Scalar) {
1541        let naf = x.non_adjacent_form(w);
1542
1543        // Reconstruct the scalar from the computed NAF
1544        let mut y = Scalar::ZERO;
1545        for i in (0..256).rev() {
1546            y += y;
1547            let digit = if naf[i] < 0 {
1548                -Scalar::from((-naf[i]) as u64)
1549            } else {
1550                Scalar::from(naf[i] as u64)
1551            };
1552            y += digit;
1553        }
1554
1555        assert_eq!(*x, y);
1556    }
1557
1558    #[test]
1559    fn non_adjacent_form_random() {
1560        let mut rng = rand::thread_rng();
1561        for _ in 0..1_000 {
1562            let x = Scalar::random(&mut rng);
1563            for w in &[5, 6, 7, 8] {
1564                non_adjacent_form_iter(*w, &x);
1565            }
1566        }
1567    }
1568
1569    #[test]
1570    fn from_u64() {
1571        let val: u64 = 0xdeadbeefdeadbeef;
1572        let s = Scalar::from(val);
1573        assert_eq!(s[7], 0xde);
1574        assert_eq!(s[6], 0xad);
1575        assert_eq!(s[5], 0xbe);
1576        assert_eq!(s[4], 0xef);
1577        assert_eq!(s[3], 0xde);
1578        assert_eq!(s[2], 0xad);
1579        assert_eq!(s[1], 0xbe);
1580        assert_eq!(s[0], 0xef);
1581    }
1582
1583    #[test]
1584    fn scalar_mul_by_one() {
1585        let test_scalar = X * Scalar::ONE;
1586        for i in 0..32 {
1587            assert!(test_scalar[i] == X[i]);
1588        }
1589    }
1590
1591    #[test]
1592    fn add_reduces() {
1593        // Check that addition wraps around the modulus
1594        assert_eq!(BASEPOINT_ORDER_MINUS_ONE + Scalar::ONE, Scalar::ZERO);
1595    }
1596
1597    #[test]
1598    fn sub_reduces() {
1599        // Check that subtraction wraps around the modulus
1600        assert_eq!(Scalar::ZERO - Scalar::ONE, BASEPOINT_ORDER_MINUS_ONE);
1601    }
1602
1603    #[test]
1604    fn impl_add() {
1605        let two = Scalar::from(2u64);
1606        let one = Scalar::ONE;
1607        let should_be_two = one + one;
1608        assert_eq!(should_be_two, two);
1609    }
1610
1611    #[allow(non_snake_case)]
1612    #[test]
1613    fn impl_mul() {
1614        let should_be_X_times_Y = X * Y;
1615        assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1616    }
1617
1618    #[allow(non_snake_case)]
1619    #[test]
1620    #[cfg(feature = "alloc")]
1621    fn impl_product() {
1622        // Test that product works for non-empty iterators
1623        let X_Y_vector = [X, Y];
1624        let should_be_X_times_Y: Scalar = X_Y_vector.iter().product();
1625        assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1626
1627        // Test that product works for the empty iterator
1628        let one = Scalar::ONE;
1629        let empty_vector = [];
1630        let should_be_one: Scalar = empty_vector.iter().product();
1631        assert_eq!(should_be_one, one);
1632
1633        // Test that product works for iterators where Item = Scalar
1634        let xs = [Scalar::from(2u64); 10];
1635        let ys = [Scalar::from(3u64); 10];
1636        // now zs is an iterator with Item = Scalar
1637        let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x * y);
1638
1639        let x_prod: Scalar = xs.iter().product();
1640        let y_prod: Scalar = ys.iter().product();
1641        let z_prod: Scalar = zs.product();
1642
1643        assert_eq!(x_prod, Scalar::from(1024u64));
1644        assert_eq!(y_prod, Scalar::from(59049u64));
1645        assert_eq!(z_prod, Scalar::from(60466176u64));
1646        assert_eq!(x_prod * y_prod, z_prod);
1647    }
1648
1649    #[test]
1650    #[cfg(feature = "alloc")]
1651    fn impl_sum() {
1652        // Test that sum works for non-empty iterators
1653        let two = Scalar::from(2u64);
1654        let one_vector = [Scalar::ONE, Scalar::ONE];
1655        let should_be_two: Scalar = one_vector.iter().sum();
1656        assert_eq!(should_be_two, two);
1657
1658        // Test that sum works for the empty iterator
1659        let zero = Scalar::ZERO;
1660        let empty_vector = [];
1661        let should_be_zero: Scalar = empty_vector.iter().sum();
1662        assert_eq!(should_be_zero, zero);
1663
1664        // Test that sum works for owned types
1665        let xs = [Scalar::from(1u64); 10];
1666        let ys = [Scalar::from(2u64); 10];
1667        // now zs is an iterator with Item = Scalar
1668        let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x + y);
1669
1670        let x_sum: Scalar = xs.iter().sum();
1671        let y_sum: Scalar = ys.iter().sum();
1672        let z_sum: Scalar = zs.sum();
1673
1674        assert_eq!(x_sum, Scalar::from(10u64));
1675        assert_eq!(y_sum, Scalar::from(20u64));
1676        assert_eq!(z_sum, Scalar::from(30u64));
1677        assert_eq!(x_sum + y_sum, z_sum);
1678    }
1679
1680    #[test]
1681    fn square() {
1682        let expected = X * X;
1683        let actual = X.unpack().square().pack();
1684        for i in 0..32 {
1685            assert!(expected[i] == actual[i]);
1686        }
1687    }
1688
1689    #[test]
1690    fn reduce() {
1691        let biggest = Scalar::from_bytes_mod_order([0xff; 32]);
1692        assert_eq!(biggest, CANONICAL_2_256_MINUS_1);
1693    }
1694
1695    #[test]
1696    fn from_bytes_mod_order_wide() {
1697        let mut bignum = [0u8; 64];
1698        // set bignum = x + 2^256x
1699        for i in 0..32 {
1700            bignum[i] = X[i];
1701            bignum[32 + i] = X[i];
1702        }
1703        // 3958878930004874126169954872055634648693766179881526445624823978500314864344
1704        // = x + 2^256x (mod l)
1705        let reduced = Scalar {
1706            bytes: [
1707                216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1708                91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1709            ],
1710        };
1711        let test_red = Scalar::from_bytes_mod_order_wide(&bignum);
1712        for i in 0..32 {
1713            assert!(test_red[i] == reduced[i]);
1714        }
1715    }
1716
1717    #[allow(non_snake_case)]
1718    #[test]
1719    fn invert() {
1720        let inv_X = X.invert();
1721        assert_eq!(inv_X, XINV);
1722        let should_be_one = inv_X * X;
1723        assert_eq!(should_be_one, Scalar::ONE);
1724    }
1725
1726    // Negating a scalar twice should result in the original scalar.
1727    #[allow(non_snake_case)]
1728    #[test]
1729    fn neg_twice_is_identity() {
1730        let negative_X = -&X;
1731        let should_be_X = -&negative_X;
1732
1733        assert_eq!(should_be_X, X);
1734    }
1735
1736    #[test]
1737    fn to_bytes_from_bytes_roundtrips() {
1738        let unpacked = X.unpack();
1739        let bytes = unpacked.to_bytes();
1740        let should_be_unpacked = UnpackedScalar::from_bytes(&bytes);
1741
1742        assert_eq!(should_be_unpacked.0, unpacked.0);
1743    }
1744
1745    #[test]
1746    fn montgomery_reduce_matches_from_bytes_mod_order_wide() {
1747        let mut bignum = [0u8; 64];
1748
1749        // set bignum = x + 2^256x
1750        for i in 0..32 {
1751            bignum[i] = X[i];
1752            bignum[32 + i] = X[i];
1753        }
1754        // x + 2^256x (mod l)
1755        //         = 3958878930004874126169954872055634648693766179881526445624823978500314864344
1756        let expected = Scalar {
1757            bytes: [
1758                216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1759                91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1760            ],
1761        };
1762        let reduced = Scalar::from_bytes_mod_order_wide(&bignum);
1763
1764        // The reduced scalar should match the expected
1765        assert_eq!(reduced.bytes, expected.bytes);
1766
1767        //  (x + 2^256x) * R
1768        let interim =
1769            UnpackedScalar::mul_internal(&UnpackedScalar::from_bytes_wide(&bignum), &constants::R);
1770        // ((x + 2^256x) * R) / R  (mod l)
1771        let montgomery_reduced = UnpackedScalar::montgomery_reduce(&interim);
1772
1773        // The Montgomery reduced scalar should match the reduced one, as well as the expected
1774        assert_eq!(montgomery_reduced.0, reduced.unpack().0);
1775        assert_eq!(montgomery_reduced.0, expected.unpack().0)
1776    }
1777
1778    #[test]
1779    fn canonical_decoding() {
1780        // canonical encoding of 1667457891
1781        let canonical_bytes = [
1782            99, 99, 99, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1783            0, 0, 0, 0,
1784        ];
1785
1786        // encoding of
1787        //   7265385991361016183439748078976496179028704920197054998554201349516117938192
1788        // = 28380414028753969466561515933501938171588560817147392552250411230663687203 (mod l)
1789        // non_canonical because unreduced mod l
1790        let non_canonical_bytes_because_unreduced = [16; 32];
1791
1792        // encoding with high bit set, to check that the parser isn't pre-masking the high bit
1793        let non_canonical_bytes_because_highbit = [
1794            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1795            0, 0, 128,
1796        ];
1797
1798        assert!(bool::from(
1799            Scalar::from_canonical_bytes(canonical_bytes).is_some()
1800        ));
1801        assert!(bool::from(
1802            Scalar::from_canonical_bytes(non_canonical_bytes_because_unreduced).is_none()
1803        ));
1804        assert!(bool::from(
1805            Scalar::from_canonical_bytes(non_canonical_bytes_because_highbit).is_none()
1806        ));
1807    }
1808
1809    #[test]
1810    #[cfg(feature = "serde")]
1811    fn serde_bincode_scalar_roundtrip() {
1812        use bincode;
1813        let encoded = bincode::serialize(&X).unwrap();
1814        let parsed: Scalar = bincode::deserialize(&encoded).unwrap();
1815        assert_eq!(parsed, X);
1816
1817        // Check that the encoding is 32 bytes exactly
1818        assert_eq!(encoded.len(), 32);
1819
1820        // Check that the encoding itself matches the usual one
1821        assert_eq!(X, bincode::deserialize(X.as_bytes()).unwrap(),);
1822    }
1823
1824    #[cfg(all(debug_assertions, feature = "alloc"))]
1825    #[test]
1826    #[should_panic]
1827    fn batch_invert_with_a_zero_input_panics() {
1828        let mut xs = vec![Scalar::ONE; 16];
1829        xs[3] = Scalar::ZERO;
1830        // This should panic in debug mode.
1831        Scalar::batch_invert(&mut xs);
1832    }
1833
1834    #[test]
1835    #[cfg(feature = "alloc")]
1836    fn batch_invert_empty() {
1837        assert_eq!(Scalar::ONE, Scalar::batch_invert(&mut []));
1838    }
1839
1840    #[test]
1841    #[cfg(feature = "alloc")]
1842    fn batch_invert_consistency() {
1843        let mut x = Scalar::from(1u64);
1844        let mut v1: Vec<_> = (0..16)
1845            .map(|_| {
1846                let tmp = x;
1847                x = x + x;
1848                tmp
1849            })
1850            .collect();
1851        let v2 = v1.clone();
1852
1853        let expected: Scalar = v1.iter().product();
1854        let expected = expected.invert();
1855        let ret = Scalar::batch_invert(&mut v1);
1856        assert_eq!(ret, expected);
1857
1858        for (a, b) in v1.iter().zip(v2.iter()) {
1859            assert_eq!(a * b, Scalar::ONE);
1860        }
1861    }
1862
1863    #[cfg(feature = "precomputed-tables")]
1864    fn test_pippenger_radix_iter(scalar: Scalar, w: usize) {
1865        let digits_count = Scalar::to_radix_2w_size_hint(w);
1866        let digits = scalar.as_radix_2w(w);
1867
1868        let radix = Scalar::from((1 << w) as u64);
1869        let mut term = Scalar::ONE;
1870        let mut recovered_scalar = Scalar::ZERO;
1871        for digit in &digits[0..digits_count] {
1872            let digit = *digit;
1873            if digit != 0 {
1874                let sdigit = if digit < 0 {
1875                    -Scalar::from((-(digit as i64)) as u64)
1876                } else {
1877                    Scalar::from(digit as u64)
1878                };
1879                recovered_scalar += term * sdigit;
1880            }
1881            term *= radix;
1882        }
1883        // When the input is unreduced, we may only recover the scalar mod l.
1884        assert_eq!(recovered_scalar, scalar.reduce());
1885    }
1886
1887    #[test]
1888    #[cfg(feature = "precomputed-tables")]
1889    fn test_pippenger_radix() {
1890        use core::iter;
1891        // For each valid radix it tests that 1000 random-ish scalars can be restored
1892        // from the produced representation precisely.
1893        let cases = (2..100)
1894            .map(|s| Scalar::from(s as u64).invert())
1895            // The largest unreduced scalar, s = 2^255-1. This is not reduced mod l. Scalar mult
1896            // still works though.
1897            .chain(iter::once(LARGEST_UNREDUCED_SCALAR));
1898
1899        for scalar in cases {
1900            test_pippenger_radix_iter(scalar, 6);
1901            test_pippenger_radix_iter(scalar, 7);
1902            test_pippenger_radix_iter(scalar, 8);
1903        }
1904    }
1905
1906    #[test]
1907    #[cfg(feature = "alloc")]
1908    fn test_read_le_u64_into() {
1909        let cases: &[(&[u8], &[u64])] = &[
1910            (
1911                &[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0],
1912                &[0xF00F_F11F_0110_EFFE],
1913            ),
1914            (
1915                &[
1916                    0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0, 0x12, 0x34, 0x56, 0x78, 0x9A,
1917                    0xBC, 0xDE, 0xF0,
1918                ],
1919                &[0xF00F_F11F_0110_EFFE, 0xF0DE_BC9A_7856_3412],
1920            ),
1921        ];
1922
1923        for (src, expected) in cases {
1924            let mut dst = vec![0; expected.len()];
1925            read_le_u64_into(src, &mut dst);
1926
1927            assert_eq!(&dst, expected, "Expected {:x?} got {:x?}", expected, dst);
1928        }
1929    }
1930
1931    // Tests consistency of From<{integer}> impls for Scalar
1932    #[test]
1933    fn test_scalar_from_int() {
1934        let s1 = Scalar::ONE;
1935
1936        // For `x` in `u8`, `u16`, `u32`, `u64`, and `u128`, check that
1937        // `Scalar::from(x + 1) == Scalar::from(x) + Scalar::from(1)`
1938
1939        let x = 0x23u8;
1940        let sx = Scalar::from(x);
1941        assert_eq!(sx + s1, Scalar::from(x + 1));
1942
1943        let x = 0x2323u16;
1944        let sx = Scalar::from(x);
1945        assert_eq!(sx + s1, Scalar::from(x + 1));
1946
1947        let x = 0x2323_2323u32;
1948        let sx = Scalar::from(x);
1949        assert_eq!(sx + s1, Scalar::from(x + 1));
1950
1951        let x = 0x2323_2323_2323_2323u64;
1952        let sx = Scalar::from(x);
1953        assert_eq!(sx + s1, Scalar::from(x + 1));
1954
1955        let x = 0x2323_2323_2323_2323_2323_2323_2323_2323u128;
1956        let sx = Scalar::from(x);
1957        assert_eq!(sx + s1, Scalar::from(x + 1));
1958    }
1959
1960    #[cfg(feature = "group")]
1961    #[test]
1962    fn ff_constants() {
1963        assert_eq!(Scalar::from(2u64) * Scalar::TWO_INV, Scalar::ONE);
1964
1965        assert_eq!(
1966            Scalar::ROOT_OF_UNITY * Scalar::ROOT_OF_UNITY_INV,
1967            Scalar::ONE,
1968        );
1969
1970        // ROOT_OF_UNITY^{2^s} mod m == 1
1971        assert_eq!(
1972            Scalar::ROOT_OF_UNITY.pow(&[1u64 << Scalar::S, 0, 0, 0]),
1973            Scalar::ONE,
1974        );
1975
1976        // DELTA^{t} mod m == 1
1977        assert_eq!(
1978            Scalar::DELTA.pow(&[
1979                0x9604_98c6_973d_74fb,
1980                0x0537_be77_a8bd_e735,
1981                0x0000_0000_0000_0000,
1982                0x0400_0000_0000_0000,
1983            ]),
1984            Scalar::ONE,
1985        );
1986    }
1987
1988    #[cfg(feature = "group")]
1989    #[test]
1990    fn ff_impls() {
1991        assert!(bool::from(Scalar::ZERO.is_even()));
1992        assert!(bool::from(Scalar::ONE.is_odd()));
1993        assert!(bool::from(Scalar::from(2u64).is_even()));
1994        assert!(bool::from(Scalar::DELTA.is_even()));
1995
1996        assert!(bool::from(Field::invert(&Scalar::ZERO).is_none()));
1997        assert_eq!(Field::invert(&X).unwrap(), XINV);
1998
1999        let x_sq = X.square();
2000        // We should get back either the positive or negative root.
2001        assert!([X, -X].contains(&x_sq.sqrt().unwrap()));
2002
2003        assert_eq!(Scalar::from_repr_vartime(X.to_repr()), Some(X));
2004        assert_eq!(Scalar::from_repr_vartime([0xff; 32]), None);
2005
2006        assert_eq!(Scalar::from_repr(X.to_repr()).unwrap(), X);
2007        assert!(bool::from(Scalar::from_repr([0xff; 32]).is_none()));
2008    }
2009
2010    #[test]
2011    #[should_panic]
2012    fn test_read_le_u64_into_should_panic_on_bad_input() {
2013        let mut dst = [0_u64; 1];
2014        // One byte short
2015        read_le_u64_into(&[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F], &mut dst);
2016    }
2017
2018    #[test]
2019    fn test_scalar_clamp() {
2020        let input = A_SCALAR.bytes;
2021        let expected = [
2022            0x18, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
2023            0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
2024            0x23, 0x76, 0xef, 0x49,
2025        ];
2026        let actual = clamp_integer(input);
2027        assert_eq!(actual, expected);
2028
2029        let expected = [
2030            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2031            0, 0, 0x40,
2032        ];
2033        let actual = clamp_integer([0; 32]);
2034        assert_eq!(expected, actual);
2035        let expected = [
2036            0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2037            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2038            0xff, 0xff, 0xff, 0x7f,
2039        ];
2040        let actual = clamp_integer([0xff; 32]);
2041        assert_eq!(actual, expected);
2042
2043        assert_eq!(
2044            LARGEST_CLAMPED_INTEGER,
2045            clamp_integer(LARGEST_CLAMPED_INTEGER)
2046        );
2047    }
2048
2049    // Check that a * b == a.reduce() * a.reduce() for ANY scalars a,b, even ones that violate
2050    // invariant #1, i.e., a,b > 2^255. Old versions of ed25519-dalek did multiplication where a
2051    // was reduced and b was clamped and unreduced. This checks that was always well-defined.
2052    #[test]
2053    fn test_mul_reduction_invariance() {
2054        let mut rng = rand::thread_rng();
2055
2056        for _ in 0..10 {
2057            // Also define c that's clamped. We'll make sure that clamping doesn't affect
2058            // computation
2059            let (a, b, c) = {
2060                let mut a_bytes = [0u8; 32];
2061                let mut b_bytes = [0u8; 32];
2062                let mut c_bytes = [0u8; 32];
2063                rng.fill_bytes(&mut a_bytes);
2064                rng.fill_bytes(&mut b_bytes);
2065                rng.fill_bytes(&mut c_bytes);
2066                (
2067                    Scalar { bytes: a_bytes },
2068                    Scalar { bytes: b_bytes },
2069                    Scalar {
2070                        bytes: clamp_integer(c_bytes),
2071                    },
2072                )
2073            };
2074
2075            // Make sure this is the same product no matter how you cut it
2076            let reduced_mul_ab = a.reduce() * b.reduce();
2077            let reduced_mul_ac = a.reduce() * c.reduce();
2078            assert_eq!(a * b, reduced_mul_ab);
2079            assert_eq!(a.reduce() * b, reduced_mul_ab);
2080            assert_eq!(a * b.reduce(), reduced_mul_ab);
2081            assert_eq!(a * c, reduced_mul_ac);
2082            assert_eq!(a.reduce() * c, reduced_mul_ac);
2083            assert_eq!(a * c.reduce(), reduced_mul_ac);
2084        }
2085    }
2086}