curve25519_dalek/
scalar.rs

1// -*- mode: rust; -*-
2//
3// This file is part of curve25519-dalek.
4// Copyright (c) 2016-2021 isis lovecruft
5// Copyright (c) 2016-2019 Henry de Valence
6// Portions Copyright 2017 Brian Smith
7// See LICENSE for licensing information.
8//
9// Authors:
10// - Isis Agora Lovecruft <isis@patternsinthevoid.net>
11// - Henry de Valence <hdevalence@hdevalence.ca>
12// - Brian Smith <brian@briansmith.org>
13
14//! Arithmetic on scalars (integers mod the group order).
15//!
16//! Both the Ristretto group and the Ed25519 basepoint have prime order
17//! \\( \ell = 2\^{252} + 27742317777372353535851937790883648493 \\).
18//!
19//! This code is intended to be useful with both the Ristretto group
20//! (where everything is done modulo \\( \ell \\)), and the X/Ed25519
21//! setting, which mandates specific bit-twiddles that are not
22//! well-defined modulo \\( \ell \\).
23//!
24//! All arithmetic on `Scalars` is done modulo \\( \ell \\).
25//!
26//! # Constructing a scalar
27//!
28//! To create a [`Scalar`](struct.Scalar.html) from a supposedly canonical encoding, use
29//! [`Scalar::from_canonical_bytes`](struct.Scalar.html#method.from_canonical_bytes).
30//!
31//! This function does input validation, ensuring that the input bytes
32//! are the canonical encoding of a `Scalar`.
33//! If they are, we'll get
34//! `Some(Scalar)` in return:
35//!
36//! ```
37//! use curve25519_dalek::scalar::Scalar;
38//!
39//! let one_as_bytes: [u8; 32] = Scalar::ONE.to_bytes();
40//! let a: Option<Scalar> = Scalar::from_canonical_bytes(one_as_bytes).into();
41//!
42//! assert!(a.is_some());
43//! ```
44//!
45//! However, if we give it bytes representing a scalar larger than \\( \ell \\)
46//! (in this case, \\( \ell + 2 \\)), we'll get `None` back:
47//!
48//! ```
49//! use curve25519_dalek::scalar::Scalar;
50//!
51//! let l_plus_two_bytes: [u8; 32] = [
52//!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
53//!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
54//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
55//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
56//! ];
57//! let a: Option<Scalar> = Scalar::from_canonical_bytes(l_plus_two_bytes).into();
58//!
59//! assert!(a.is_none());
60//! ```
61//!
62//! Another way to create a `Scalar` is by reducing a \\(256\\)-bit integer mod
63//! \\( \ell \\), for which one may use the
64//! [`Scalar::from_bytes_mod_order`](struct.Scalar.html#method.from_bytes_mod_order)
65//! method.  In the case of the second example above, this would reduce the
66//! resultant scalar \\( \mod \ell \\), producing \\( 2 \\):
67//!
68//! ```
69//! use curve25519_dalek::scalar::Scalar;
70//!
71//! let l_plus_two_bytes: [u8; 32] = [
72//!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
73//!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
74//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
75//!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
76//! ];
77//! let a: Scalar = Scalar::from_bytes_mod_order(l_plus_two_bytes);
78//!
79//! let two: Scalar = Scalar::ONE + Scalar::ONE;
80//!
81//! assert!(a == two);
82//! ```
83//!
84//! There is also a constructor that reduces a \\(512\\)-bit integer,
85//! [`Scalar::from_bytes_mod_order_wide`].
86//!
87//! To construct a `Scalar` as the hash of some input data, use
88//! [`Scalar::hash_from_bytes`], which takes a buffer, or
89//! [`Scalar::from_hash`], which allows an IUF API.
90//!
91#![cfg_attr(feature = "digest", doc = "```")]
92#![cfg_attr(not(feature = "digest"), doc = "```ignore")]
93//! # fn main() {
94//! use sha2::{Digest, Sha512};
95//! use curve25519_dalek::scalar::Scalar;
96//!
97//! // Hashing a single byte slice
98//! let a = Scalar::hash_from_bytes::<Sha512>(b"Abolish ICE");
99//!
100//! // Streaming data into a hash object
101//! let mut hasher = Sha512::default();
102//! hasher.update(b"Abolish ");
103//! hasher.update(b"ICE");
104//! let a2 = Scalar::from_hash(hasher);
105//!
106//! assert_eq!(a, a2);
107//! # }
108//! ```
109//!
110//! See also `Scalar::hash_from_bytes` and `Scalar::from_hash` that
111//! reduces a \\(512\\)-bit integer, if the optional `digest` feature
112//! has been enabled.
113
114use core::borrow::Borrow;
115use core::fmt::Debug;
116use core::iter::{Product, Sum};
117use core::ops::Index;
118use core::ops::Neg;
119use core::ops::{Add, AddAssign};
120use core::ops::{Mul, MulAssign};
121use core::ops::{Sub, SubAssign};
122
123use cfg_if::cfg_if;
124
125#[cfg(feature = "group")]
126use group::ff::{Field, FromUniformBytes, PrimeField};
127#[cfg(feature = "group-bits")]
128use group::ff::{FieldBits, PrimeFieldBits};
129
130#[cfg(any(test, feature = "group"))]
131use rand_core::RngCore;
132
133#[cfg(any(test, feature = "rand_core"))]
134use rand_core::CryptoRngCore;
135
136#[cfg(feature = "digest")]
137use digest::generic_array::typenum::U64;
138#[cfg(feature = "digest")]
139use digest::Digest;
140
141use subtle::Choice;
142use subtle::ConditionallySelectable;
143use subtle::ConstantTimeEq;
144use subtle::CtOption;
145
146#[cfg(feature = "zeroize")]
147use zeroize::Zeroize;
148
149use crate::backend;
150use crate::constants;
151
152cfg_if! {
153    if #[cfg(curve25519_dalek_backend = "fiat")] {
154        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
155        ///
156        /// This is a type alias for one of the scalar types in the `backend`
157        /// module.
158        #[cfg(curve25519_dalek_bits = "32")]
159        #[cfg_attr(
160            docsrs,
161            doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "32")))
162        )]
163        type UnpackedScalar = backend::serial::fiat_u32::scalar::Scalar29;
164
165        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
166        ///
167        /// This is a type alias for one of the scalar types in the `backend`
168        /// module.
169        #[cfg(curve25519_dalek_bits = "64")]
170        #[cfg_attr(
171            docsrs,
172            doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "64")))
173        )]
174        type UnpackedScalar = backend::serial::fiat_u64::scalar::Scalar52;
175    } else if #[cfg(curve25519_dalek_bits = "64")] {
176        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
177        ///
178        /// This is a type alias for one of the scalar types in the `backend`
179        /// module.
180        #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
181        type UnpackedScalar = backend::serial::u64::scalar::Scalar52;
182    } else {
183        /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
184        ///
185        /// This is a type alias for one of the scalar types in the `backend`
186        /// module.
187        #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
188        type UnpackedScalar = backend::serial::u32::scalar::Scalar29;
189    }
190}
191
192/// The `Scalar` struct holds an element of \\(\mathbb Z / \ell\mathbb Z \\).
193#[allow(clippy::derived_hash_with_manual_eq)]
194#[derive(Copy, Clone, Hash)]
195pub struct Scalar {
196    /// `bytes` is a little-endian byte encoding of an integer representing a scalar modulo the
197    /// group order.
198    ///
199    /// # Invariant #1
200    ///
201    /// The integer representing this scalar is less than \\(2\^{255}\\). That is, the most
202    /// significant bit of `bytes[31]` is 0.
203    ///
204    /// This is required for `EdwardsPoint` variable- and fixed-base multiplication, because most
205    /// integers above 2^255 are unrepresentable in our radix-16 NAF (see [`Self::as_radix_16`]).
206    /// The invariant is also required because our `MontgomeryPoint` multiplication assumes the MSB
207    /// is 0 (see `MontgomeryPoint::mul`).
208    ///
209    /// # Invariant #2 (weak)
210    ///
211    /// The integer representing this scalar is less than \\(2\^{255} - 19 \\), i.e., it represents
212    /// a canonical representative of an element of \\( \mathbb Z / \ell\mathbb Z \\). This is
213    /// stronger than invariant #1. It also sometimes has to be broken.
214    ///
215    /// This invariant is deliberately broken in the implementation of `EdwardsPoint::{mul_clamped,
216    /// mul_base_clamped}`, `MontgomeryPoint::{mul_clamped, mul_base_clamped}`, and
217    /// `BasepointTable::mul_base_clamped`. This is not an issue though. As mentioned above,
218    /// scalar-point multiplication is defined for any choice of `bytes` that satisfies invariant
219    /// #1. Since clamping guarantees invariant #1 is satisfied, these operations are well defined.
220    ///
221    /// Note: Scalar-point mult is the _only_ thing you can do safely with an unreduced scalar.
222    /// Scalar-scalar addition and subtraction are NOT correct when using unreduced scalars.
223    /// Multiplication is correct, but this is only due to a quirk of our implementation, and not
224    /// guaranteed to hold in general in the future.
225    ///
226    /// Note: It is not possible to construct an unreduced `Scalar` from the public API unless the
227    /// `legacy_compatibility` is enabled (thus making `Scalar::from_bits` public). Thus, for all
228    /// public non-legacy uses, invariant #2
229    /// always holds.
230    ///
231    pub(crate) bytes: [u8; 32],
232}
233
234impl Scalar {
235    /// Construct a `Scalar` by reducing a 256-bit little-endian integer
236    /// modulo the group order \\( \ell \\).
237    pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar {
238        // Temporarily allow s_unreduced.bytes > 2^255 ...
239        let s_unreduced = Scalar { bytes };
240
241        // Then reduce mod the group order and return the reduced representative.
242        let s = s_unreduced.reduce();
243        debug_assert_eq!(0u8, s[31] >> 7);
244
245        s
246    }
247
248    /// Construct a `Scalar` by reducing a 512-bit little-endian integer
249    /// modulo the group order \\( \ell \\).
250    pub fn from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar {
251        UnpackedScalar::from_bytes_wide(input).pack()
252    }
253
254    /// Attempt to construct a `Scalar` from a canonical byte representation.
255    ///
256    /// # Return
257    ///
258    /// - `Some(s)`, where `s` is the `Scalar` corresponding to `bytes`,
259    ///   if `bytes` is a canonical byte representation modulo the group order \\( \ell \\);
260    /// - `None` if `bytes` is not a canonical byte representation.
261    pub fn from_canonical_bytes(bytes: [u8; 32]) -> CtOption<Scalar> {
262        let high_bit_unset = (bytes[31] >> 7).ct_eq(&0);
263        let candidate = Scalar { bytes };
264        CtOption::new(candidate, high_bit_unset & candidate.is_canonical())
265    }
266
267    /// Construct a `Scalar` from the low 255 bits of a 256-bit integer. This breaks the invariant
268    /// that scalars are always reduced. Scalar-scalar arithmetic, i.e., addition, subtraction,
269    /// multiplication, **does not work** on scalars produced from this function. You may only use
270    /// the output of this function for `EdwardsPoint::mul`, `MontgomeryPoint::mul`, and
271    /// `EdwardsPoint::vartime_double_scalar_mul_basepoint`. **Do not use this function** unless
272    /// you absolutely have to.
273    #[cfg(feature = "legacy_compatibility")]
274    #[deprecated(
275        since = "4.0.0",
276        note = "This constructor outputs scalars with undefined scalar-scalar arithmetic. See docs."
277    )]
278    pub const fn from_bits(bytes: [u8; 32]) -> Scalar {
279        let mut s = Scalar { bytes };
280        // Ensure invariant #1 holds. That is, make s < 2^255 by masking the high bit.
281        s.bytes[31] &= 0b0111_1111;
282
283        s
284    }
285}
286
287impl Debug for Scalar {
288    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
289        write!(f, "Scalar{{\n\tbytes: {:?},\n}}", &self.bytes)
290    }
291}
292
293impl Eq for Scalar {}
294impl PartialEq for Scalar {
295    fn eq(&self, other: &Self) -> bool {
296        self.ct_eq(other).into()
297    }
298}
299
300impl ConstantTimeEq for Scalar {
301    fn ct_eq(&self, other: &Self) -> Choice {
302        self.bytes.ct_eq(&other.bytes)
303    }
304}
305
306impl Index<usize> for Scalar {
307    type Output = u8;
308
309    /// Index the bytes of the representative for this `Scalar`.  Mutation is not permitted.
310    fn index(&self, _index: usize) -> &u8 {
311        &(self.bytes[_index])
312    }
313}
314
315impl<'a> MulAssign<&'a Scalar> for Scalar {
316    fn mul_assign(&mut self, _rhs: &'a Scalar) {
317        *self = UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack();
318    }
319}
320
321define_mul_assign_variants!(LHS = Scalar, RHS = Scalar);
322
323impl<'a> Mul<&'a Scalar> for &Scalar {
324    type Output = Scalar;
325    fn mul(self, _rhs: &'a Scalar) -> Scalar {
326        UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack()
327    }
328}
329
330define_mul_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
331
332impl<'a> AddAssign<&'a Scalar> for Scalar {
333    fn add_assign(&mut self, _rhs: &'a Scalar) {
334        *self = *self + _rhs;
335    }
336}
337
338define_add_assign_variants!(LHS = Scalar, RHS = Scalar);
339
340impl<'a> Add<&'a Scalar> for &Scalar {
341    type Output = Scalar;
342    #[allow(non_snake_case)]
343    fn add(self, _rhs: &'a Scalar) -> Scalar {
344        // The UnpackedScalar::add function produces reduced outputs if the inputs are reduced. By
345        // Scalar invariant #1, this is always the case.
346        UnpackedScalar::add(&self.unpack(), &_rhs.unpack()).pack()
347    }
348}
349
350define_add_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
351
352impl<'a> SubAssign<&'a Scalar> for Scalar {
353    fn sub_assign(&mut self, _rhs: &'a Scalar) {
354        *self = *self - _rhs;
355    }
356}
357
358define_sub_assign_variants!(LHS = Scalar, RHS = Scalar);
359
360impl<'a> Sub<&'a Scalar> for &Scalar {
361    type Output = Scalar;
362    #[allow(non_snake_case)]
363    fn sub(self, rhs: &'a Scalar) -> Scalar {
364        // The UnpackedScalar::sub function produces reduced outputs if the inputs are reduced. By
365        // Scalar invariant #1, this is always the case.
366        UnpackedScalar::sub(&self.unpack(), &rhs.unpack()).pack()
367    }
368}
369
370define_sub_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
371
372impl Neg for &Scalar {
373    type Output = Scalar;
374    #[allow(non_snake_case)]
375    fn neg(self) -> Scalar {
376        let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R);
377        let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R);
378        UnpackedScalar::sub(&UnpackedScalar::ZERO, &self_mod_l).pack()
379    }
380}
381
382impl Neg for Scalar {
383    type Output = Scalar;
384    fn neg(self) -> Scalar {
385        -&self
386    }
387}
388
389impl ConditionallySelectable for Scalar {
390    fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
391        let mut bytes = [0u8; 32];
392        #[allow(clippy::needless_range_loop)]
393        for i in 0..32 {
394            bytes[i] = u8::conditional_select(&a.bytes[i], &b.bytes[i], choice);
395        }
396        Scalar { bytes }
397    }
398}
399
400#[cfg(feature = "serde")]
401use serde::de::Visitor;
402#[cfg(feature = "serde")]
403use serde::{Deserialize, Deserializer, Serialize, Serializer};
404
405#[cfg(feature = "serde")]
406#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
407impl Serialize for Scalar {
408    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
409    where
410        S: Serializer,
411    {
412        use serde::ser::SerializeTuple;
413        let mut tup = serializer.serialize_tuple(32)?;
414        for byte in self.as_bytes().iter() {
415            tup.serialize_element(byte)?;
416        }
417        tup.end()
418    }
419}
420
421#[cfg(feature = "serde")]
422#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
423impl<'de> Deserialize<'de> for Scalar {
424    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
425    where
426        D: Deserializer<'de>,
427    {
428        struct ScalarVisitor;
429
430        impl<'de> Visitor<'de> for ScalarVisitor {
431            type Value = Scalar;
432
433            fn expecting(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
434                formatter.write_str(
435                    "a sequence of 32 bytes whose little-endian interpretation is less than the \
436                    basepoint order ℓ",
437                )
438            }
439
440            fn visit_seq<A>(self, mut seq: A) -> Result<Scalar, A::Error>
441            where
442                A: serde::de::SeqAccess<'de>,
443            {
444                let mut bytes = [0u8; 32];
445                #[allow(clippy::needless_range_loop)]
446                for i in 0..32 {
447                    bytes[i] = seq
448                        .next_element()?
449                        .ok_or_else(|| serde::de::Error::invalid_length(i, &"expected 32 bytes"))?;
450                }
451                Option::from(Scalar::from_canonical_bytes(bytes))
452                    .ok_or_else(|| serde::de::Error::custom("scalar was not canonically encoded"))
453            }
454        }
455
456        deserializer.deserialize_tuple(32, ScalarVisitor)
457    }
458}
459
460impl<T> Product<T> for Scalar
461where
462    T: Borrow<Scalar>,
463{
464    fn product<I>(iter: I) -> Self
465    where
466        I: Iterator<Item = T>,
467    {
468        iter.fold(Scalar::ONE, |acc, item| acc * item.borrow())
469    }
470}
471
472impl<T> Sum<T> for Scalar
473where
474    T: Borrow<Scalar>,
475{
476    fn sum<I>(iter: I) -> Self
477    where
478        I: Iterator<Item = T>,
479    {
480        iter.fold(Scalar::ZERO, |acc, item| acc + item.borrow())
481    }
482}
483
484impl Default for Scalar {
485    fn default() -> Scalar {
486        Scalar::ZERO
487    }
488}
489
490impl From<u8> for Scalar {
491    fn from(x: u8) -> Scalar {
492        let mut s_bytes = [0u8; 32];
493        s_bytes[0] = x;
494        Scalar { bytes: s_bytes }
495    }
496}
497
498impl From<u16> for Scalar {
499    fn from(x: u16) -> Scalar {
500        let mut s_bytes = [0u8; 32];
501        let x_bytes = x.to_le_bytes();
502        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
503        Scalar { bytes: s_bytes }
504    }
505}
506
507impl From<u32> for Scalar {
508    fn from(x: u32) -> Scalar {
509        let mut s_bytes = [0u8; 32];
510        let x_bytes = x.to_le_bytes();
511        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
512        Scalar { bytes: s_bytes }
513    }
514}
515
516impl From<u64> for Scalar {
517    /// Construct a scalar from the given `u64`.
518    ///
519    /// # Inputs
520    ///
521    /// An `u64` to convert to a `Scalar`.
522    ///
523    /// # Returns
524    ///
525    /// A `Scalar` corresponding to the input `u64`.
526    ///
527    /// # Example
528    ///
529    /// ```
530    /// use curve25519_dalek::scalar::Scalar;
531    ///
532    /// let fourtytwo = Scalar::from(42u64);
533    /// let six = Scalar::from(6u64);
534    /// let seven = Scalar::from(7u64);
535    ///
536    /// assert!(fourtytwo == six * seven);
537    /// ```
538    fn from(x: u64) -> Scalar {
539        let mut s_bytes = [0u8; 32];
540        let x_bytes = x.to_le_bytes();
541        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
542        Scalar { bytes: s_bytes }
543    }
544}
545
546impl From<u128> for Scalar {
547    fn from(x: u128) -> Scalar {
548        let mut s_bytes = [0u8; 32];
549        let x_bytes = x.to_le_bytes();
550        s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
551        Scalar { bytes: s_bytes }
552    }
553}
554
555#[cfg(feature = "zeroize")]
556impl Zeroize for Scalar {
557    fn zeroize(&mut self) {
558        self.bytes.zeroize();
559    }
560}
561
562impl Scalar {
563    /// The scalar \\( 0 \\).
564    pub const ZERO: Self = Self { bytes: [0u8; 32] };
565
566    /// The scalar \\( 1 \\).
567    pub const ONE: Self = Self {
568        bytes: [
569            1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
570            0, 0, 0,
571        ],
572    };
573
574    #[cfg(any(test, feature = "rand_core"))]
575    /// Return a `Scalar` chosen uniformly at random using a user-provided RNG.
576    ///
577    /// # Inputs
578    ///
579    /// * `rng`: any RNG which implements `CryptoRngCore`
580    ///   (i.e. `CryptoRng` + `RngCore`) interface.
581    ///
582    /// # Returns
583    ///
584    /// A random scalar within \\(\mathbb{Z} / \ell\mathbb{Z}\\).
585    ///
586    /// # Example
587    ///
588    /// ```
589    /// # fn main() {
590    /// use curve25519_dalek::scalar::Scalar;
591    ///
592    /// use rand_core::OsRng;
593    ///
594    /// let mut csprng = OsRng;
595    /// let a: Scalar = Scalar::random(&mut csprng);
596    /// # }
597    pub fn random<R: CryptoRngCore + ?Sized>(rng: &mut R) -> Self {
598        let mut scalar_bytes = [0u8; 64];
599        rng.fill_bytes(&mut scalar_bytes);
600        Scalar::from_bytes_mod_order_wide(&scalar_bytes)
601    }
602
603    #[cfg(feature = "digest")]
604    /// Hash a slice of bytes into a scalar.
605    ///
606    /// Takes a type parameter `D`, which is any `Digest` producing 64
607    /// bytes (512 bits) of output.
608    ///
609    /// Convenience wrapper around `from_hash`.
610    ///
611    /// # Example
612    ///
613    #[cfg_attr(feature = "digest", doc = "```")]
614    #[cfg_attr(not(feature = "digest"), doc = "```ignore")]
615    /// # use curve25519_dalek::scalar::Scalar;
616    /// use sha2::Sha512;
617    ///
618    /// # // Need fn main() here in comment so the doctest compiles
619    /// # // See https://doc.rust-lang.org/book/documentation.html#documentation-as-tests
620    /// # fn main() {
621    /// let msg = "To really appreciate architecture, you may even need to commit a murder";
622    /// let s = Scalar::hash_from_bytes::<Sha512>(msg.as_bytes());
623    /// # }
624    /// ```
625    pub fn hash_from_bytes<D>(input: &[u8]) -> Scalar
626    where
627        D: Digest<OutputSize = U64> + Default,
628    {
629        let mut hash = D::default();
630        hash.update(input);
631        Scalar::from_hash(hash)
632    }
633
634    #[cfg(feature = "digest")]
635    /// Construct a scalar from an existing `Digest` instance.
636    ///
637    /// Use this instead of `hash_from_bytes` if it is more convenient
638    /// to stream data into the `Digest` than to pass a single byte
639    /// slice.
640    ///
641    /// # Example
642    ///
643    /// ```
644    /// # use curve25519_dalek::scalar::Scalar;
645    /// use curve25519_dalek::digest::Update;
646    ///
647    /// use sha2::Digest;
648    /// use sha2::Sha512;
649    ///
650    /// # fn main() {
651    /// let mut h = Sha512::new()
652    ///     .chain("To really appreciate architecture, you may even need to commit a murder.")
653    ///     .chain("While the programs used for The Manhattan Transcripts are of the most extreme")
654    ///     .chain("nature, they also parallel the most common formula plot: the archetype of")
655    ///     .chain("murder. Other phantasms were occasionally used to underline the fact that")
656    ///     .chain("perhaps all architecture, rather than being about functional standards, is")
657    ///     .chain("about love and death.");
658    ///
659    /// let s = Scalar::from_hash(h);
660    ///
661    /// println!("{:?}", s.to_bytes());
662    /// assert_eq!(
663    ///     s.to_bytes(),
664    ///     [  21,  88, 208, 252,  63, 122, 210, 152,
665    ///       154,  38,  15,  23,  16, 167,  80, 150,
666    ///       192, 221,  77, 226,  62,  25, 224, 148,
667    ///       239,  48, 176,  10, 185,  69, 168,  11, ],
668    /// );
669    /// # }
670    /// ```
671    pub fn from_hash<D>(hash: D) -> Scalar
672    where
673        D: Digest<OutputSize = U64>,
674    {
675        let mut output = [0u8; 64];
676        output.copy_from_slice(hash.finalize().as_slice());
677        Scalar::from_bytes_mod_order_wide(&output)
678    }
679
680    /// Convert this `Scalar` to its underlying sequence of bytes.
681    ///
682    /// # Example
683    ///
684    /// ```
685    /// use curve25519_dalek::scalar::Scalar;
686    ///
687    /// let s: Scalar = Scalar::ZERO;
688    ///
689    /// assert!(s.to_bytes() == [0u8; 32]);
690    /// ```
691    pub const fn to_bytes(&self) -> [u8; 32] {
692        self.bytes
693    }
694
695    /// View the little-endian byte encoding of the integer representing this Scalar.
696    ///
697    /// # Example
698    ///
699    /// ```
700    /// use curve25519_dalek::scalar::Scalar;
701    ///
702    /// let s: Scalar = Scalar::ZERO;
703    ///
704    /// assert!(s.as_bytes() == &[0u8; 32]);
705    /// ```
706    pub const fn as_bytes(&self) -> &[u8; 32] {
707        &self.bytes
708    }
709
710    /// Given a nonzero `Scalar`, compute its multiplicative inverse.
711    ///
712    /// # Warning
713    ///
714    /// `self` **MUST** be nonzero.  If you cannot
715    /// *prove* that this is the case, you **SHOULD NOT USE THIS
716    /// FUNCTION**.
717    ///
718    /// # Returns
719    ///
720    /// The multiplicative inverse of the this `Scalar`.
721    ///
722    /// # Example
723    ///
724    /// ```
725    /// use curve25519_dalek::scalar::Scalar;
726    ///
727    /// // x = 2238329342913194256032495932344128051776374960164957527413114840482143558222
728    /// let X: Scalar = Scalar::from_bytes_mod_order([
729    ///         0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84,
730    ///         0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2, 0x7d, 0x52,
731    ///         0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44,
732    ///         0xd4, 0x49, 0xf4, 0xa8, 0x79, 0xd9, 0xf2, 0x04,
733    ///     ]);
734    /// // 1/x = 6859937278830797291664592131120606308688036382723378951768035303146619657244
735    /// let XINV: Scalar = Scalar::from_bytes_mod_order([
736    ///         0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb,
737    ///         0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01, 0x63, 0x47,
738    ///         0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96,
739    ///         0xd5, 0x0b, 0xcd, 0x7a, 0x3f, 0x96, 0x2a, 0x0f,
740    ///     ]);
741    ///
742    /// let inv_X: Scalar = X.invert();
743    /// assert!(XINV == inv_X);
744    /// let should_be_one: Scalar = &inv_X * &X;
745    /// assert!(should_be_one == Scalar::ONE);
746    /// ```
747    pub fn invert(&self) -> Scalar {
748        self.unpack().invert().pack()
749    }
750
751    /// Given a slice of nonzero (possibly secret) `Scalar`s,
752    /// compute their inverses in a batch.
753    ///
754    /// # Return
755    ///
756    /// Each element of `inputs` is replaced by its inverse.
757    ///
758    /// The product of all inverses is returned.
759    ///
760    /// # Warning
761    ///
762    /// All input `Scalars` **MUST** be nonzero.  If you cannot
763    /// *prove* that this is the case, you **SHOULD NOT USE THIS
764    /// FUNCTION**.
765    ///
766    /// # Example
767    ///
768    /// ```
769    /// # use curve25519_dalek::scalar::Scalar;
770    /// # fn main() {
771    /// let mut scalars = [
772    ///     Scalar::from(3u64),
773    ///     Scalar::from(5u64),
774    ///     Scalar::from(7u64),
775    ///     Scalar::from(11u64),
776    /// ];
777    ///
778    /// let allinv = Scalar::batch_invert(&mut scalars);
779    ///
780    /// assert_eq!(allinv, Scalar::from(3*5*7*11u64).invert());
781    /// assert_eq!(scalars[0], Scalar::from(3u64).invert());
782    /// assert_eq!(scalars[1], Scalar::from(5u64).invert());
783    /// assert_eq!(scalars[2], Scalar::from(7u64).invert());
784    /// assert_eq!(scalars[3], Scalar::from(11u64).invert());
785    /// # }
786    /// ```
787    #[cfg(feature = "alloc")]
788    pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar {
789        // This code is essentially identical to the FieldElement
790        // implementation, and is documented there.  Unfortunately,
791        // it's not easy to write it generically, since here we want
792        // to use `UnpackedScalar`s internally, and `Scalar`s
793        // externally, but there's no corresponding distinction for
794        // field elements.
795
796        let n = inputs.len();
797        let one: UnpackedScalar = Scalar::ONE.unpack().as_montgomery();
798
799        let mut scratch = vec![one; n];
800
801        // Keep an accumulator of all of the previous products
802        let mut acc = Scalar::ONE.unpack().as_montgomery();
803
804        // Pass through the input vector, recording the previous
805        // products in the scratch space
806        // (using while loop to avoid iter_mut().zip() which pulls in Iterator traits)
807        let mut i = 0;
808        while i < n {
809            let input = &mut inputs[i];
810            scratch[i] = acc;
811
812            // Avoid unnecessary Montgomery multiplication in second pass by
813            // keeping inputs in Montgomery form
814            let tmp = input.unpack().as_montgomery();
815            *input = tmp.pack();
816            acc = UnpackedScalar::montgomery_mul(&acc, &tmp);
817            i += 1;
818        }
819
820        // acc is nonzero iff all inputs are nonzero
821        debug_assert!(acc.pack() != Scalar::ZERO);
822
823        // Compute the inverse of all products
824        acc = acc.montgomery_invert().from_montgomery();
825
826        // We need to return the product of all inverses later
827        let ret = acc.pack();
828
829        // Pass through the vector backwards to compute the inverses
830        // in place (using while loop to avoid .rev() which causes Charon trait mismatch)
831        let mut i = n;
832        while i > 0 {
833            i -= 1;
834            let input = &mut inputs[i];
835            let scratch_val = &scratch[i];
836            let tmp = UnpackedScalar::montgomery_mul(&acc, &input.unpack());
837            *input = UnpackedScalar::montgomery_mul(&acc, scratch_val).pack();
838            acc = tmp;
839        }
840
841        #[cfg(feature = "zeroize")]
842        Zeroize::zeroize(&mut scratch);
843
844        ret
845    }
846
847    /// Get the bits of the scalar, in little-endian order
848    #[cfg(not(verify))]
849    #[allow(dead_code)]
850    pub(crate) fn bits_le(&self) -> impl DoubleEndedIterator<Item = bool> + '_ {
851        (0..256).map(|i| {
852            // As i runs from 0..256, the bottom 3 bits index the bit, while the upper bits index
853            // the byte. Since self.bytes is little-endian at the byte level, this iterator is
854            // little-endian on the bit level
855            ((self.bytes[i >> 3] >> (i & 7)) & 1u8) == 1
856        })
857    }
858
859    /// Compute a width-\\(w\\) "Non-Adjacent Form" of this scalar.
860    ///
861    /// A width-\\(w\\) NAF of a positive integer \\(k\\) is an expression
862    /// $$
863    /// k = \sum_{i=0}\^m n\_i 2\^i,
864    /// $$
865    /// where each nonzero
866    /// coefficient \\(n\_i\\) is odd and bounded by \\(|n\_i| < 2\^{w-1}\\),
867    /// \\(n\_{m-1}\\) is nonzero, and at most one of any \\(w\\) consecutive
868    /// coefficients is nonzero.  (Hankerson, Menezes, Vanstone; def 3.32).
869    ///
870    /// The length of the NAF is at most one more than the length of
871    /// the binary representation of \\(k\\).  This is why the
872    /// `Scalar` type maintains an invariant (invariant #1) that the top bit is
873    /// \\(0\\), so that the NAF of a scalar has at most 256 digits.
874    ///
875    /// Intuitively, this is like a binary expansion, except that we
876    /// allow some coefficients to grow in magnitude up to
877    /// \\(2\^{w-1}\\) so that the nonzero coefficients are as sparse
878    /// as possible.
879    ///
880    /// When doing scalar multiplication, we can then use a lookup
881    /// table of precomputed multiples of a point to add the nonzero
882    /// terms \\( k_i P \\).  Using signed digits cuts the table size
883    /// in half, and using odd digits cuts the table size in half
884    /// again.
885    ///
886    /// To compute a \\(w\\)-NAF, we use a modification of Algorithm 3.35 of HMV:
887    ///
888    /// 1. \\( i \gets 0 \\)
889    /// 2. While \\( k \ge 1 \\):
890    ///     1. If \\(k\\) is odd, \\( n_i \gets k \operatorname{mods} 2^w \\), \\( k \gets k - n_i \\).
891    ///     2. If \\(k\\) is even, \\( n_i \gets 0 \\).
892    ///     3. \\( k \gets k / 2 \\), \\( i \gets i + 1 \\).
893    /// 3. Return \\( n_0, n_1, ... , \\)
894    ///
895    /// Here \\( \bar x = x \operatorname{mods} 2^w \\) means the
896    /// \\( \bar x \\) with \\( \bar x \equiv x \pmod{2^w} \\) and
897    /// \\( -2^{w-1} \leq \bar x < 2^{w-1} \\).
898    ///
899    /// We implement this by scanning across the bits of \\(k\\) from
900    /// least-significant bit to most-significant-bit.
901    /// Write the bits of \\(k\\) as
902    /// $$
903    /// k = \sum\_{i=0}\^m k\_i 2^i,
904    /// $$
905    /// and split the sum as
906    /// $$
907    /// k = \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \sum\_{i=0} k\_{i+w} 2^i
908    /// $$
909    /// where the first part is \\( k \mod 2^w \\).
910    ///
911    /// If \\( k \mod 2^w\\) is odd, and \\( k \mod 2^w < 2^{w-1} \\), then we emit
912    /// \\( n_0 = k \mod 2^w \\).  Instead of computing
913    /// \\( k - n_0 \\), we just advance \\(w\\) bits and reindex.
914    ///
915    /// If \\( k \mod 2^w\\) is odd, and \\( k \mod 2^w \ge 2^{w-1} \\), then
916    /// \\( n_0 = k \operatorname{mods} 2^w = k \mod 2^w - 2^w \\).
917    /// The quantity \\( k - n_0 \\) is
918    /// $$
919    /// \begin{aligned}
920    /// k - n_0 &= \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \sum\_{i=0} k\_{i+w} 2^i
921    ///          - \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \\\\
922    /// &= 2^w + 2^w \sum\_{i=0} k\_{i+w} 2^i
923    /// \end{aligned}
924    /// $$
925    /// so instead of computing the subtraction, we can set a carry
926    /// bit, advance \\(w\\) bits, and reindex.
927    ///
928    /// If \\( k \mod 2^w\\) is even, we emit \\(0\\), advance 1 bit
929    /// and reindex.  In fact, by setting all digits to \\(0\\)
930    /// initially, we don't need to emit anything.
931    pub(crate) fn non_adjacent_form(&self, w: usize) -> [i8; 256] {
932        // required by the NAF definition
933        debug_assert!(w >= 2);
934        // required so that the NAF digits fit in i8
935        debug_assert!(w <= 8);
936
937        let mut naf = [0i8; 256];
938
939        let mut x_u64 = [0u64; 5];
940        read_le_u64_into(&self.bytes, &mut x_u64[0..4]);
941
942        let width = 1 << w;
943        let window_mask = width - 1;
944
945        let mut pos = 0;
946        let mut carry = 0;
947        while pos < 256 {
948            // Construct a buffer of bits of the scalar, starting at bit `pos`
949            let u64_idx = pos / 64;
950            let bit_idx = pos % 64;
951            let bit_buf: u64 = if bit_idx < 64 - w {
952                // This window's bits are contained in a single u64
953                x_u64[u64_idx] >> bit_idx
954            } else {
955                // Combine the current u64's bits with the bits from the next u64
956                (x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx))
957            };
958
959            // Add the carry into the current window
960            let window = carry + (bit_buf & window_mask);
961
962            if window & 1 == 0 {
963                // If the window value is even, preserve the carry and continue.
964                // Why is the carry preserved?
965                // If carry == 0 and window & 1 == 0, then the next carry should be 0
966                // If carry == 1 and window & 1 == 0, then bit_buf & 1 == 1 so the next carry should be 1
967                pos += 1;
968                continue;
969            }
970
971            if window < width / 2 {
972                carry = 0;
973                naf[pos] = window as i8;
974            } else {
975                carry = 1;
976                naf[pos] = (window as i8).wrapping_sub(width as i8);
977            }
978
979            pos += w;
980        }
981
982        naf
983    }
984
985    /// Write this scalar in radix 16, with coefficients in \\([-8,8)\\),
986    /// i.e., compute \\(a\_i\\) such that
987    /// $$
988    ///    a = a\_0 + a\_1 16\^1 + \cdots + a_{63} 16\^{63},
989    /// $$
990    /// with \\(-8 \leq a_i < 8\\) for \\(0 \leq i < 63\\) and \\(-8 \leq a_{63} \leq 8\\).
991    ///
992    /// The largest value that can be decomposed like this is just over \\(2^{255}\\). Thus, in
993    /// order to not error, the top bit MUST NOT be set, i.e., `Self` MUST be less than
994    /// \\(2^{255}\\).
995    pub(crate) fn as_radix_16(&self) -> [i8; 64] {
996        debug_assert!(self[31] <= 127);
997        let mut output = [0i8; 64];
998
999        // Step 1: change radix.
1000        // Convert from radix 256 (bytes) to radix 16 (nibbles)
1001        #[allow(clippy::identity_op)]
1002        #[inline(always)]
1003        fn bot_half(x: u8) -> u8 {
1004            (x >> 0) & 15
1005        }
1006        #[inline(always)]
1007        fn top_half(x: u8) -> u8 {
1008            (x >> 4) & 15
1009        }
1010
1011        let mut i = 0;
1012        while i < 32 {
1013            output[2 * i] = bot_half(self[i]) as i8;
1014            output[2 * i + 1] = top_half(self[i]) as i8;
1015            i += 1;
1016        }
1017        // Precondition note: since self[31] <= 127, output[63] <= 7
1018
1019        // Step 2: recenter coefficients from [0,16) to [-8,8)
1020        let mut i = 0;
1021        while i < 63 {
1022            let carry = (output[i] + 8) >> 4;
1023            output[i] -= carry << 4;
1024            output[i + 1] += carry;
1025            i += 1;
1026        }
1027        // Precondition note: output[63] is not recentered.  It
1028        // increases by carry <= 1.  Thus output[63] <= 8.
1029
1030        output
1031    }
1032
1033    /// Returns a size hint indicating how many entries of the return
1034    /// value of `to_radix_2w` are nonzero.
1035    #[cfg(any(feature = "alloc", all(test, feature = "precomputed-tables")))]
1036    pub(crate) fn to_radix_2w_size_hint(w: usize) -> usize {
1037        debug_assert!(w >= 4);
1038        debug_assert!(w <= 8);
1039
1040        // TODO: upstream uses `match w { 4..=7 => ..., 8 => ... }` but Aeneas
1041        // translates usize match arms as scalar patterns which fail Lean's
1042        // dependent elimination for Usize (see FIXME in
1043        // aeneas/backends/lean/Aeneas/Std/Scalar/Notations.lean:131-142).
1044        // Using if/else avoids the issue.
1045        let digits_count = if w <= 7 {
1046            (256 + w - 1) / w
1047        } else if w == 8 {
1048            // See comment in to_radix_2w on handling the terminal carry.
1049            (256 + w - 1) / w + 1_usize
1050        } else {
1051            panic!("invalid radix parameter")
1052        };
1053
1054        debug_assert!(digits_count <= 64);
1055        digits_count
1056    }
1057
1058    /// Creates a representation of a Scalar in radix \\( 2^w \\) with \\(w = 4, 5, 6, 7, 8\\) for
1059    /// use with the Pippenger algorithm. Higher radixes are not supported to save cache space.
1060    /// Radix 256 is near-optimal even for very large inputs.
1061    ///
1062    /// Radix below 16 or above 256 is prohibited.
1063    /// This method returns digits in a fixed-sized array, excess digits are zeroes.
1064    ///
1065    /// For radix 16, `Self` must be less than \\(2^{255}\\). This is because most integers larger
1066    /// than \\(2^{255}\\) are unrepresentable in the form described below for \\(w = 4\\). This
1067    /// would be true for \\(w = 8\\) as well, but it is compensated for by increasing the size
1068    /// hint by 1.
1069    ///
1070    /// ## Scalar representation
1071    ///
1072    /// Radix \\(2\^w\\), with \\(n = ceil(256/w)\\) coefficients in \\([-(2\^w)/2,(2\^w)/2)\\),
1073    /// i.e., scalar is represented using digits \\(a\_i\\) such that
1074    /// $$
1075    ///    a = a\_0 + a\_1 2\^1w + \cdots + a_{n-1} 2\^{w*(n-1)},
1076    /// $$
1077    /// with \\(-2\^w/2 \leq a_i < 2\^w/2\\) for \\(0 \leq i < (n-1)\\) and \\(-2\^w/2 \leq a_{n-1} \leq 2\^w/2\\).
1078    ///
1079    #[cfg(any(feature = "alloc", feature = "precomputed-tables"))]
1080    pub(crate) fn as_radix_2w(&self, w: usize) -> [i8; 64] {
1081        debug_assert!(w >= 4);
1082        debug_assert!(w <= 8);
1083
1084        if w == 4 {
1085            return self.as_radix_16();
1086        }
1087
1088        // Scalar formatted as four `u64`s with carry bit packed into the highest bit.
1089        let mut scalar64x4 = [0u64; 4];
1090        read_le_u64_into(&self.bytes, &mut scalar64x4[0..4]);
1091
1092        let radix: u64 = 1 << w;
1093        let window_mask: u64 = radix - 1;
1094
1095        let mut carry = 0u64;
1096        let mut digits = [0i8; 64];
1097        let digits_count = (256 + w - 1) / w;
1098        #[allow(clippy::needless_range_loop)]
1099        for i in 0..digits_count {
1100            // Construct a buffer of bits of the scalar, starting at `bit_offset`.
1101            let bit_offset = i * w;
1102            let u64_idx = bit_offset / 64;
1103            let bit_idx = bit_offset % 64;
1104
1105            // Read the bits from the scalar
1106            let bit_buf: u64 = if bit_idx < 64 - w || u64_idx == 3 {
1107                // This window's bits are contained in a single u64,
1108                // or it's the last u64 anyway.
1109                scalar64x4[u64_idx] >> bit_idx
1110            } else {
1111                // Combine the current u64's bits with the bits from the next u64
1112                (scalar64x4[u64_idx] >> bit_idx) | (scalar64x4[1 + u64_idx] << (64 - bit_idx))
1113            };
1114
1115            // Read the actual coefficient value from the window
1116            let coef = carry + (bit_buf & window_mask); // coef = [0, 2^r)
1117
1118            // Recenter coefficients from [0,2^w) to [-2^w/2, 2^w/2)
1119            carry = (coef + (radix / 2)) >> w;
1120            digits[i] = ((coef as i64) - (carry << w) as i64) as i8;
1121        }
1122
1123        // When 4 < w < 8, we can fold the final carry onto the last digit d,
1124        // because d < 2^w/2 so d + carry*2^w = d + 1*2^w < 2^(w+1) < 2^8.
1125        //
1126        // When w = 8, we can't fit carry*2^w into an i8.  This should
1127        // not happen anyways, because the final carry will be 0 for
1128        // reduced scalars, but Scalar invariant #1 allows 255-bit scalars.
1129        // To handle this, we expand the size_hint by 1 when w=8,
1130        // and accumulate the final carry onto another digit.
1131        // TODO: upstream uses `match w { 8 => ..., _ => ... }` — see
1132        // TODO comment in to_radix_2w_size_hint for why we use if/else.
1133        if w == 8 {
1134            digits[digits_count] += carry as i8;
1135        } else {
1136            digits[digits_count - 1] += (carry << w) as i8;
1137        }
1138
1139        digits
1140    }
1141
1142    /// Unpack this `Scalar` to an `UnpackedScalar` for faster arithmetic.
1143    pub(crate) fn unpack(&self) -> UnpackedScalar {
1144        UnpackedScalar::from_bytes(&self.bytes)
1145    }
1146
1147    /// Reduce this `Scalar` modulo \\(\ell\\).
1148    #[allow(non_snake_case)]
1149    fn reduce(&self) -> Scalar {
1150        let x = self.unpack();
1151        let xR = UnpackedScalar::mul_internal(&x, &constants::R);
1152        let x_mod_l = UnpackedScalar::montgomery_reduce(&xR);
1153        x_mod_l.pack()
1154    }
1155
1156    /// Check whether this `Scalar` is the canonical representative mod \\(\ell\\). This is not
1157    /// public because any `Scalar` that is publicly observed is reduced, by scalar invariant #2.
1158    fn is_canonical(&self) -> Choice {
1159        self.ct_eq(&self.reduce())
1160    }
1161}
1162
1163impl UnpackedScalar {
1164    /// Pack the limbs of this `UnpackedScalar` into a `Scalar`.
1165    fn pack(&self) -> Scalar {
1166        Scalar {
1167            bytes: self.to_bytes(),
1168        }
1169    }
1170
1171    /// Inverts an UnpackedScalar in Montgomery form.
1172    #[rustfmt::skip] // keep alignment of addition chain and squarings
1173    #[allow(clippy::just_underscores_and_digits)]
1174    pub fn montgomery_invert(&self) -> UnpackedScalar {
1175        // Uses the addition chain from
1176        // https://briansmith.org/ecc-inversion-addition-chains-01#curve25519_scalar_inversion
1177        let    _1 = *self;
1178        let   _10 = _1.montgomery_square();
1179        let  _100 = _10.montgomery_square();
1180        let   _11 = UnpackedScalar::montgomery_mul(&_10,     &_1);
1181        let  _101 = UnpackedScalar::montgomery_mul(&_10,    &_11);
1182        let  _111 = UnpackedScalar::montgomery_mul(&_10,   &_101);
1183        let _1001 = UnpackedScalar::montgomery_mul(&_10,   &_111);
1184        let _1011 = UnpackedScalar::montgomery_mul(&_10,  &_1001);
1185        let _1111 = UnpackedScalar::montgomery_mul(&_100, &_1011);
1186
1187        // _10000
1188        let mut y = UnpackedScalar::montgomery_mul(&_1111, &_1);
1189
1190        #[inline]
1191        fn square_multiply(y: &mut UnpackedScalar, squarings: usize, x: &UnpackedScalar) {
1192            let mut i = 0;
1193            while i <  squarings {
1194                *y = y.montgomery_square();
1195                i += 1;
1196            }
1197            *y = UnpackedScalar::montgomery_mul(y, x);
1198        }
1199
1200        square_multiply(&mut y, 123 + 3, &_101);
1201        square_multiply(&mut y,   2 + 2, &_11);
1202        square_multiply(&mut y,   1 + 4, &_1111);
1203        square_multiply(&mut y,   1 + 4, &_1111);
1204        square_multiply(&mut y,       4, &_1001);
1205        square_multiply(&mut y,       2, &_11);
1206        square_multiply(&mut y,   1 + 4, &_1111);
1207        square_multiply(&mut y,   1 + 3, &_101);
1208        square_multiply(&mut y,   3 + 3, &_101);
1209        square_multiply(&mut y,       3, &_111);
1210        square_multiply(&mut y,   1 + 4, &_1111);
1211        square_multiply(&mut y,   2 + 3, &_111);
1212        square_multiply(&mut y,   2 + 2, &_11);
1213        square_multiply(&mut y,   1 + 4, &_1011);
1214        square_multiply(&mut y,   2 + 4, &_1011);
1215        square_multiply(&mut y,   6 + 4, &_1001);
1216        square_multiply(&mut y,   2 + 2, &_11);
1217        square_multiply(&mut y,   3 + 2, &_11);
1218        square_multiply(&mut y,   3 + 2, &_11);
1219        square_multiply(&mut y,   1 + 4, &_1001);
1220        square_multiply(&mut y,   1 + 3, &_111);
1221        square_multiply(&mut y,   2 + 4, &_1111);
1222        square_multiply(&mut y,   1 + 4, &_1011);
1223        square_multiply(&mut y,       3, &_101);
1224        square_multiply(&mut y,   2 + 4, &_1111);
1225        square_multiply(&mut y,       3, &_101);
1226        square_multiply(&mut y,   1 + 2, &_11);
1227
1228        y
1229    }
1230
1231    /// Inverts an UnpackedScalar not in Montgomery form.
1232    pub fn invert(&self) -> UnpackedScalar {
1233        self.as_montgomery().montgomery_invert().from_montgomery()
1234    }
1235}
1236
1237#[cfg(feature = "group")]
1238impl Field for Scalar {
1239    const ZERO: Self = Self::ZERO;
1240    const ONE: Self = Self::ONE;
1241
1242    fn random(mut rng: impl RngCore) -> Self {
1243        // NOTE: this is duplicated due to different `rng` bounds
1244        let mut scalar_bytes = [0u8; 64];
1245        rng.fill_bytes(&mut scalar_bytes);
1246        Self::from_bytes_mod_order_wide(&scalar_bytes)
1247    }
1248
1249    fn square(&self) -> Self {
1250        self * self
1251    }
1252
1253    fn double(&self) -> Self {
1254        self + self
1255    }
1256
1257    fn invert(&self) -> CtOption<Self> {
1258        CtOption::new(self.invert(), !self.is_zero())
1259    }
1260
1261    fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) {
1262        #[allow(unused_qualifications)]
1263        group::ff::helpers::sqrt_ratio_generic(num, div)
1264    }
1265
1266    fn sqrt(&self) -> CtOption<Self> {
1267        #[allow(unused_qualifications)]
1268        group::ff::helpers::sqrt_tonelli_shanks(
1269            self,
1270            [
1271                0xcb02_4c63_4b9e_ba7d,
1272                0x029b_df3b_d45e_f39a,
1273                0x0000_0000_0000_0000,
1274                0x0200_0000_0000_0000,
1275            ],
1276        )
1277    }
1278}
1279
1280#[cfg(feature = "group")]
1281impl PrimeField for Scalar {
1282    type Repr = [u8; 32];
1283
1284    fn from_repr(repr: Self::Repr) -> CtOption<Self> {
1285        Self::from_canonical_bytes(repr)
1286    }
1287
1288    fn from_repr_vartime(repr: Self::Repr) -> Option<Self> {
1289        // Check that the high bit is not set
1290        if (repr[31] >> 7) != 0u8 {
1291            return None;
1292        }
1293
1294        let candidate = Scalar { bytes: repr };
1295
1296        if candidate == candidate.reduce() {
1297            Some(candidate)
1298        } else {
1299            None
1300        }
1301    }
1302
1303    fn to_repr(&self) -> Self::Repr {
1304        self.to_bytes()
1305    }
1306
1307    fn is_odd(&self) -> Choice {
1308        Choice::from(self.as_bytes()[0] & 1)
1309    }
1310
1311    const MODULUS: &'static str =
1312        "0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed";
1313    const NUM_BITS: u32 = 253;
1314    const CAPACITY: u32 = 252;
1315
1316    const TWO_INV: Self = Self {
1317        bytes: [
1318            0xf7, 0xe9, 0x7a, 0x2e, 0x8d, 0x31, 0x09, 0x2c, 0x6b, 0xce, 0x7b, 0x51, 0xef, 0x7c,
1319            0x6f, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1320            0x00, 0x00, 0x00, 0x08,
1321        ],
1322    };
1323    const MULTIPLICATIVE_GENERATOR: Self = Self {
1324        bytes: [
1325            2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1326            0, 0, 0,
1327        ],
1328    };
1329    const S: u32 = 2;
1330    const ROOT_OF_UNITY: Self = Self {
1331        bytes: [
1332            0xd4, 0x07, 0xbe, 0xeb, 0xdf, 0x75, 0x87, 0xbe, 0xfe, 0x83, 0xce, 0x42, 0x53, 0x56,
1333            0xf0, 0x0e, 0x7a, 0xc2, 0xc1, 0xab, 0x60, 0x6d, 0x3d, 0x7d, 0xe7, 0x81, 0x79, 0xe0,
1334            0x10, 0x73, 0x4a, 0x09,
1335        ],
1336    };
1337    const ROOT_OF_UNITY_INV: Self = Self {
1338        bytes: [
1339            0x19, 0xcc, 0x37, 0x71, 0x3a, 0xed, 0x8a, 0x99, 0xd7, 0x18, 0x29, 0x60, 0x8b, 0xa3,
1340            0xee, 0x05, 0x86, 0x3d, 0x3e, 0x54, 0x9f, 0x92, 0xc2, 0x82, 0x18, 0x7e, 0x86, 0x1f,
1341            0xef, 0x8c, 0xb5, 0x06,
1342        ],
1343    };
1344    const DELTA: Self = Self {
1345        bytes: [
1346            16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1347            0, 0, 0,
1348        ],
1349    };
1350}
1351
1352#[cfg(feature = "group-bits")]
1353impl PrimeFieldBits for Scalar {
1354    type ReprBits = [u8; 32];
1355
1356    fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
1357        self.to_repr().into()
1358    }
1359
1360    fn char_le_bits() -> FieldBits<Self::ReprBits> {
1361        constants::BASEPOINT_ORDER_PRIVATE.to_bytes().into()
1362    }
1363}
1364
1365#[cfg(feature = "group")]
1366impl FromUniformBytes<64> for Scalar {
1367    fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
1368        Scalar::from_bytes_mod_order_wide(bytes)
1369    }
1370}
1371
1372/// Read one or more u64s stored as little endian bytes.
1373///
1374/// ## Panics
1375/// Panics if `src.len() != 8 * dst.len()`.
1376fn read_le_u64_into(src: &[u8], dst: &mut [u64]) {
1377    assert!(src.len() == 8 * dst.len());
1378    let mut i = 0;
1379    while i < dst.len() {
1380        let start = i * 8;
1381        dst[i] = u64::from_le_bytes([
1382            src[start],
1383            src[start + 1],
1384            src[start + 2],
1385            src[start + 3],
1386            src[start + 4],
1387            src[start + 5],
1388            src[start + 6],
1389            src[start + 7],
1390        ]);
1391        i += 1;
1392    }
1393}
1394
1395/// _Clamps_ the given little-endian representation of a 32-byte integer. Clamping the value puts
1396/// it in the range:
1397///
1398/// **n ∈ 2^254 + 8\*{0, 1, 2, 3, . . ., 2^251 − 1}**
1399///
1400/// # Explanation of clamping
1401///
1402/// For Curve25519, h = 8, and multiplying by 8 is the same as a binary left-shift by 3 bits.
1403/// If you take a secret scalar value between 2^251 and 2^252 – 1 and left-shift by 3 bits
1404/// then you end up with a 255-bit number with the most significant bit set to 1 and
1405/// the least-significant three bits set to 0.
1406///
1407/// The Curve25519 clamping operation takes **an arbitrary 256-bit random value** and
1408/// clears the most-significant bit (making it a 255-bit number), sets the next bit, and then
1409/// clears the 3 least-significant bits. In other words, it directly creates a scalar value that is
1410/// in the right form and pre-multiplied by the cofactor.
1411///
1412/// See [here](https://neilmadden.blog/2020/05/28/whats-the-curve25519-clamping-all-about/) for
1413/// more details.
1414#[must_use]
1415pub const fn clamp_integer(mut bytes: [u8; 32]) -> [u8; 32] {
1416    bytes[0] &= 0b1111_1000;
1417    bytes[31] &= 0b0111_1111;
1418    bytes[31] |= 0b0100_0000;
1419    bytes
1420}
1421
1422#[cfg(test)]
1423pub(crate) mod test {
1424    use super::*;
1425
1426    #[cfg(feature = "alloc")]
1427    use alloc::vec::Vec;
1428
1429    /// x = 2238329342913194256032495932344128051776374960164957527413114840482143558222
1430    pub static X: Scalar = Scalar {
1431        bytes: [
1432            0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84, 0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2,
1433            0x7d, 0x52, 0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44, 0xd4, 0x49, 0xf4, 0xa8,
1434            0x79, 0xd9, 0xf2, 0x04,
1435        ],
1436    };
1437    /// 1/x = 6859937278830797291664592131120606308688036382723378951768035303146619657244
1438    pub static XINV: Scalar = Scalar {
1439        bytes: [
1440            0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb, 0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01,
1441            0x63, 0x47, 0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96, 0xd5, 0x0b, 0xcd, 0x7a,
1442            0x3f, 0x96, 0x2a, 0x0f,
1443        ],
1444    };
1445    /// y = 2592331292931086675770238855846338635550719849568364935475441891787804997264
1446    pub static Y: Scalar = Scalar {
1447        bytes: [
1448            0x90, 0x76, 0x33, 0xfe, 0x1c, 0x4b, 0x66, 0xa4, 0xa2, 0x8d, 0x2d, 0xd7, 0x67, 0x83,
1449            0x86, 0xc3, 0x53, 0xd0, 0xde, 0x54, 0x55, 0xd4, 0xfc, 0x9d, 0xe8, 0xef, 0x7a, 0xc3,
1450            0x1f, 0x35, 0xbb, 0x05,
1451        ],
1452    };
1453
1454    /// The largest scalar that satisfies invariant #1, i.e., the largest scalar with the top bit
1455    /// set to 0. Since this scalar violates invariant #2, i.e., it's greater than the modulus `l`,
1456    /// addition and subtraction are broken. The only thing you can do with this is scalar-point
1457    /// multiplication (and actually also scalar-scalar multiplication, but that's just a quirk of
1458    /// our implementation).
1459    pub(crate) static LARGEST_UNREDUCED_SCALAR: Scalar = Scalar {
1460        bytes: [
1461            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1462            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1463            0xff, 0xff, 0xff, 0x7f,
1464        ],
1465    };
1466
1467    /// x*y = 5690045403673944803228348699031245560686958845067437804563560795922180092780
1468    static X_TIMES_Y: Scalar = Scalar {
1469        bytes: [
1470            0x6c, 0x33, 0x74, 0xa1, 0x89, 0x4f, 0x62, 0x21, 0x0a, 0xaa, 0x2f, 0xe1, 0x86, 0xa6,
1471            0xf9, 0x2c, 0xe0, 0xaa, 0x75, 0xc2, 0x77, 0x95, 0x81, 0xc2, 0x95, 0xfc, 0x08, 0x17,
1472            0x9a, 0x73, 0x94, 0x0c,
1473        ],
1474    };
1475
1476    /// sage: l = 2^252 + 27742317777372353535851937790883648493
1477    /// sage: big = 2^256 - 1
1478    /// sage: repr((big % l).digits(256))
1479    static CANONICAL_2_256_MINUS_1: Scalar = Scalar {
1480        bytes: [
1481            28, 149, 152, 141, 116, 49, 236, 214, 112, 207, 125, 115, 244, 91, 239, 198, 254, 255,
1482            255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15,
1483        ],
1484    };
1485
1486    static A_SCALAR: Scalar = Scalar {
1487        bytes: [
1488            0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
1489            0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
1490            0x23, 0x76, 0xef, 0x09,
1491        ],
1492    };
1493
1494    static A_NAF: [i8; 256] = [
1495        0, 13, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, -11, 0, 0, 0, 0, 3, 0, 0,
1496        0, 0, 1, 0, 0, 0, 0, 9, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 11, 0, 0, 0, 0,
1497        11, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
1498        0, -1, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, -15, 0, 0, 0, 0, -7, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 5,
1499        0, 0, 0, 0, 13, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, -11, 0, 0, 0, 0, -7, 0, 0, 0, 0, -13, 0, 0,
1500        0, 0, 11, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1501        7, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 15,
1502        0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, -15, 0,
1503        0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1504    ];
1505
1506    const BASEPOINT_ORDER_MINUS_ONE: Scalar = Scalar {
1507        bytes: [
1508            0xec, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9,
1509            0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1510            0x00, 0x00, 0x00, 0x10,
1511        ],
1512    };
1513
1514    /// The largest clamped integer
1515    static LARGEST_CLAMPED_INTEGER: [u8; 32] = clamp_integer(LARGEST_UNREDUCED_SCALAR.bytes);
1516
1517    #[test]
1518    fn fuzzer_testcase_reduction() {
1519        // LE bytes of 24519928653854221733733552434404946937899825954937634815
1520        let a_bytes = [
1521            255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
1522            255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1523        ];
1524        // LE bytes of 4975441334397345751130612518500927154628011511324180036903450236863266160640
1525        let b_bytes = [
1526            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 210, 210,
1527            210, 255, 255, 255, 255, 10,
1528        ];
1529        // LE bytes of 6432735165214683820902750800207468552549813371247423777071615116673864412038
1530        let c_bytes = [
1531            134, 171, 119, 216, 180, 128, 178, 62, 171, 132, 32, 62, 34, 119, 104, 193, 47, 215,
1532            181, 250, 14, 207, 172, 93, 75, 207, 211, 103, 144, 204, 56, 14,
1533        ];
1534
1535        let a = Scalar::from_bytes_mod_order(a_bytes);
1536        let b = Scalar::from_bytes_mod_order(b_bytes);
1537        let c = Scalar::from_bytes_mod_order(c_bytes);
1538
1539        let mut tmp = [0u8; 64];
1540
1541        // also_a = (a mod l)
1542        tmp[0..32].copy_from_slice(&a_bytes[..]);
1543        let also_a = Scalar::from_bytes_mod_order_wide(&tmp);
1544
1545        // also_b = (b mod l)
1546        tmp[0..32].copy_from_slice(&b_bytes[..]);
1547        let also_b = Scalar::from_bytes_mod_order_wide(&tmp);
1548
1549        let expected_c = a * b;
1550        let also_expected_c = also_a * also_b;
1551
1552        assert_eq!(c, expected_c);
1553        assert_eq!(c, also_expected_c);
1554    }
1555
1556    #[test]
1557    fn non_adjacent_form_test_vector() {
1558        let naf = A_SCALAR.non_adjacent_form(5);
1559        for i in 0..256 {
1560            assert_eq!(naf[i], A_NAF[i]);
1561        }
1562    }
1563
1564    fn non_adjacent_form_iter(w: usize, x: &Scalar) {
1565        let naf = x.non_adjacent_form(w);
1566
1567        // Reconstruct the scalar from the computed NAF
1568        let mut y = Scalar::ZERO;
1569        for i in (0..256).rev() {
1570            y += y;
1571            let digit = if naf[i] < 0 {
1572                -Scalar::from((-naf[i]) as u64)
1573            } else {
1574                Scalar::from(naf[i] as u64)
1575            };
1576            y += digit;
1577        }
1578
1579        assert_eq!(*x, y);
1580    }
1581
1582    #[test]
1583    fn non_adjacent_form_random() {
1584        let mut rng = rand::thread_rng();
1585        for _ in 0..1_000 {
1586            let x = Scalar::random(&mut rng);
1587            for w in &[5, 6, 7, 8] {
1588                non_adjacent_form_iter(*w, &x);
1589            }
1590        }
1591    }
1592
1593    #[test]
1594    fn from_u64() {
1595        let val: u64 = 0xdeadbeefdeadbeef;
1596        let s = Scalar::from(val);
1597        assert_eq!(s[7], 0xde);
1598        assert_eq!(s[6], 0xad);
1599        assert_eq!(s[5], 0xbe);
1600        assert_eq!(s[4], 0xef);
1601        assert_eq!(s[3], 0xde);
1602        assert_eq!(s[2], 0xad);
1603        assert_eq!(s[1], 0xbe);
1604        assert_eq!(s[0], 0xef);
1605    }
1606
1607    #[test]
1608    fn scalar_mul_by_one() {
1609        let test_scalar = X * Scalar::ONE;
1610        for i in 0..32 {
1611            assert!(test_scalar[i] == X[i]);
1612        }
1613    }
1614
1615    #[test]
1616    fn add_reduces() {
1617        // Check that addition wraps around the modulus
1618        assert_eq!(BASEPOINT_ORDER_MINUS_ONE + Scalar::ONE, Scalar::ZERO);
1619    }
1620
1621    #[test]
1622    fn sub_reduces() {
1623        // Check that subtraction wraps around the modulus
1624        assert_eq!(Scalar::ZERO - Scalar::ONE, BASEPOINT_ORDER_MINUS_ONE);
1625    }
1626
1627    #[test]
1628    fn impl_add() {
1629        let two = Scalar::from(2u64);
1630        let one = Scalar::ONE;
1631        let should_be_two = one + one;
1632        assert_eq!(should_be_two, two);
1633    }
1634
1635    #[allow(non_snake_case)]
1636    #[test]
1637    fn impl_mul() {
1638        let should_be_X_times_Y = X * Y;
1639        assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1640    }
1641
1642    #[allow(non_snake_case)]
1643    #[test]
1644    #[cfg(feature = "alloc")]
1645    fn impl_product() {
1646        // Test that product works for non-empty iterators
1647        let X_Y_vector = [X, Y];
1648        let should_be_X_times_Y: Scalar = X_Y_vector.iter().product();
1649        assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1650
1651        // Test that product works for the empty iterator
1652        let one = Scalar::ONE;
1653        let empty_vector = [];
1654        let should_be_one: Scalar = empty_vector.iter().product();
1655        assert_eq!(should_be_one, one);
1656
1657        // Test that product works for iterators where Item = Scalar
1658        let xs = [Scalar::from(2u64); 10];
1659        let ys = [Scalar::from(3u64); 10];
1660        // now zs is an iterator with Item = Scalar
1661        let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x * y);
1662
1663        let x_prod: Scalar = xs.iter().product();
1664        let y_prod: Scalar = ys.iter().product();
1665        let z_prod: Scalar = zs.product();
1666
1667        assert_eq!(x_prod, Scalar::from(1024u64));
1668        assert_eq!(y_prod, Scalar::from(59049u64));
1669        assert_eq!(z_prod, Scalar::from(60466176u64));
1670        assert_eq!(x_prod * y_prod, z_prod);
1671    }
1672
1673    #[test]
1674    #[cfg(feature = "alloc")]
1675    fn impl_sum() {
1676        // Test that sum works for non-empty iterators
1677        let two = Scalar::from(2u64);
1678        let one_vector = [Scalar::ONE, Scalar::ONE];
1679        let should_be_two: Scalar = one_vector.iter().sum();
1680        assert_eq!(should_be_two, two);
1681
1682        // Test that sum works for the empty iterator
1683        let zero = Scalar::ZERO;
1684        let empty_vector = [];
1685        let should_be_zero: Scalar = empty_vector.iter().sum();
1686        assert_eq!(should_be_zero, zero);
1687
1688        // Test that sum works for owned types
1689        let xs = [Scalar::from(1u64); 10];
1690        let ys = [Scalar::from(2u64); 10];
1691        // now zs is an iterator with Item = Scalar
1692        let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x + y);
1693
1694        let x_sum: Scalar = xs.iter().sum();
1695        let y_sum: Scalar = ys.iter().sum();
1696        let z_sum: Scalar = zs.sum();
1697
1698        assert_eq!(x_sum, Scalar::from(10u64));
1699        assert_eq!(y_sum, Scalar::from(20u64));
1700        assert_eq!(z_sum, Scalar::from(30u64));
1701        assert_eq!(x_sum + y_sum, z_sum);
1702    }
1703
1704    #[test]
1705    fn square() {
1706        let expected = X * X;
1707        let actual = X.unpack().square().pack();
1708        for i in 0..32 {
1709            assert!(expected[i] == actual[i]);
1710        }
1711    }
1712
1713    #[test]
1714    fn reduce() {
1715        let biggest = Scalar::from_bytes_mod_order([0xff; 32]);
1716        assert_eq!(biggest, CANONICAL_2_256_MINUS_1);
1717    }
1718
1719    #[test]
1720    fn from_bytes_mod_order_wide() {
1721        let mut bignum = [0u8; 64];
1722        // set bignum = x + 2^256x
1723        for i in 0..32 {
1724            bignum[i] = X[i];
1725            bignum[32 + i] = X[i];
1726        }
1727        // 3958878930004874126169954872055634648693766179881526445624823978500314864344
1728        // = x + 2^256x (mod l)
1729        let reduced = Scalar {
1730            bytes: [
1731                216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1732                91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1733            ],
1734        };
1735        let test_red = Scalar::from_bytes_mod_order_wide(&bignum);
1736        for i in 0..32 {
1737            assert!(test_red[i] == reduced[i]);
1738        }
1739    }
1740
1741    #[allow(non_snake_case)]
1742    #[test]
1743    fn invert() {
1744        let inv_X = X.invert();
1745        assert_eq!(inv_X, XINV);
1746        let should_be_one = inv_X * X;
1747        assert_eq!(should_be_one, Scalar::ONE);
1748    }
1749
1750    // Negating a scalar twice should result in the original scalar.
1751    #[allow(non_snake_case)]
1752    #[test]
1753    fn neg_twice_is_identity() {
1754        let negative_X = -&X;
1755        let should_be_X = -&negative_X;
1756
1757        assert_eq!(should_be_X, X);
1758    }
1759
1760    #[test]
1761    fn to_bytes_from_bytes_roundtrips() {
1762        let unpacked = X.unpack();
1763        let bytes = unpacked.to_bytes();
1764        let should_be_unpacked = UnpackedScalar::from_bytes(&bytes);
1765
1766        assert_eq!(should_be_unpacked.0, unpacked.0);
1767    }
1768
1769    #[test]
1770    fn montgomery_reduce_matches_from_bytes_mod_order_wide() {
1771        let mut bignum = [0u8; 64];
1772
1773        // set bignum = x + 2^256x
1774        for i in 0..32 {
1775            bignum[i] = X[i];
1776            bignum[32 + i] = X[i];
1777        }
1778        // x + 2^256x (mod l)
1779        //         = 3958878930004874126169954872055634648693766179881526445624823978500314864344
1780        let expected = Scalar {
1781            bytes: [
1782                216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1783                91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1784            ],
1785        };
1786        let reduced = Scalar::from_bytes_mod_order_wide(&bignum);
1787
1788        // The reduced scalar should match the expected
1789        assert_eq!(reduced.bytes, expected.bytes);
1790
1791        //  (x + 2^256x) * R
1792        let interim =
1793            UnpackedScalar::mul_internal(&UnpackedScalar::from_bytes_wide(&bignum), &constants::R);
1794        // ((x + 2^256x) * R) / R  (mod l)
1795        let montgomery_reduced = UnpackedScalar::montgomery_reduce(&interim);
1796
1797        // The Montgomery reduced scalar should match the reduced one, as well as the expected
1798        assert_eq!(montgomery_reduced.0, reduced.unpack().0);
1799        assert_eq!(montgomery_reduced.0, expected.unpack().0)
1800    }
1801
1802    #[test]
1803    fn canonical_decoding() {
1804        // canonical encoding of 1667457891
1805        let canonical_bytes = [
1806            99, 99, 99, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1807            0, 0, 0, 0,
1808        ];
1809
1810        // encoding of
1811        //   7265385991361016183439748078976496179028704920197054998554201349516117938192
1812        // = 28380414028753969466561515933501938171588560817147392552250411230663687203 (mod l)
1813        // non_canonical because unreduced mod l
1814        let non_canonical_bytes_because_unreduced = [16; 32];
1815
1816        // encoding with high bit set, to check that the parser isn't pre-masking the high bit
1817        let non_canonical_bytes_because_highbit = [
1818            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1819            0, 0, 128,
1820        ];
1821
1822        assert!(bool::from(
1823            Scalar::from_canonical_bytes(canonical_bytes).is_some()
1824        ));
1825        assert!(bool::from(
1826            Scalar::from_canonical_bytes(non_canonical_bytes_because_unreduced).is_none()
1827        ));
1828        assert!(bool::from(
1829            Scalar::from_canonical_bytes(non_canonical_bytes_because_highbit).is_none()
1830        ));
1831    }
1832
1833    #[test]
1834    #[cfg(feature = "serde")]
1835    fn serde_bincode_scalar_roundtrip() {
1836        use bincode;
1837        let encoded = bincode::serialize(&X).unwrap();
1838        let parsed: Scalar = bincode::deserialize(&encoded).unwrap();
1839        assert_eq!(parsed, X);
1840
1841        // Check that the encoding is 32 bytes exactly
1842        assert_eq!(encoded.len(), 32);
1843
1844        // Check that the encoding itself matches the usual one
1845        assert_eq!(X, bincode::deserialize(X.as_bytes()).unwrap(),);
1846    }
1847
1848    #[cfg(all(debug_assertions, feature = "alloc"))]
1849    #[test]
1850    #[should_panic]
1851    fn batch_invert_with_a_zero_input_panics() {
1852        let mut xs = vec![Scalar::ONE; 16];
1853        xs[3] = Scalar::ZERO;
1854        // This should panic in debug mode.
1855        Scalar::batch_invert(&mut xs);
1856    }
1857
1858    #[test]
1859    #[cfg(feature = "alloc")]
1860    fn batch_invert_empty() {
1861        assert_eq!(Scalar::ONE, Scalar::batch_invert(&mut []));
1862    }
1863
1864    #[test]
1865    #[cfg(feature = "alloc")]
1866    fn batch_invert_consistency() {
1867        let mut x = Scalar::from(1u64);
1868        let mut v1: Vec<_> = (0..16)
1869            .map(|_| {
1870                let tmp = x;
1871                x = x + x;
1872                tmp
1873            })
1874            .collect();
1875        let v2 = v1.clone();
1876
1877        let expected: Scalar = v1.iter().product();
1878        let expected = expected.invert();
1879        let ret = Scalar::batch_invert(&mut v1);
1880        assert_eq!(ret, expected);
1881
1882        for (a, b) in v1.iter().zip(v2.iter()) {
1883            assert_eq!(a * b, Scalar::ONE);
1884        }
1885    }
1886
1887    #[cfg(feature = "precomputed-tables")]
1888    fn test_pippenger_radix_iter(scalar: Scalar, w: usize) {
1889        let digits_count = Scalar::to_radix_2w_size_hint(w);
1890        let digits = scalar.as_radix_2w(w);
1891
1892        let radix = Scalar::from((1 << w) as u64);
1893        let mut term = Scalar::ONE;
1894        let mut recovered_scalar = Scalar::ZERO;
1895        for digit in &digits[0..digits_count] {
1896            let digit = *digit;
1897            if digit != 0 {
1898                let sdigit = if digit < 0 {
1899                    -Scalar::from((-(digit as i64)) as u64)
1900                } else {
1901                    Scalar::from(digit as u64)
1902                };
1903                recovered_scalar += term * sdigit;
1904            }
1905            term *= radix;
1906        }
1907        // When the input is unreduced, we may only recover the scalar mod l.
1908        assert_eq!(recovered_scalar, scalar.reduce());
1909    }
1910
1911    #[test]
1912    #[cfg(feature = "precomputed-tables")]
1913    fn test_pippenger_radix() {
1914        use core::iter;
1915        // For each valid radix it tests that 1000 random-ish scalars can be restored
1916        // from the produced representation precisely.
1917        let cases = (2..100)
1918            .map(|s| Scalar::from(s as u64).invert())
1919            // The largest unreduced scalar, s = 2^255-1. This is not reduced mod l. Scalar mult
1920            // still works though.
1921            .chain(iter::once(LARGEST_UNREDUCED_SCALAR));
1922
1923        for scalar in cases {
1924            test_pippenger_radix_iter(scalar, 6);
1925            test_pippenger_radix_iter(scalar, 7);
1926            test_pippenger_radix_iter(scalar, 8);
1927        }
1928    }
1929
1930    #[test]
1931    #[cfg(feature = "alloc")]
1932    fn test_read_le_u64_into() {
1933        let cases: &[(&[u8], &[u64])] = &[
1934            (
1935                &[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0],
1936                &[0xF00F_F11F_0110_EFFE],
1937            ),
1938            (
1939                &[
1940                    0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0, 0x12, 0x34, 0x56, 0x78, 0x9A,
1941                    0xBC, 0xDE, 0xF0,
1942                ],
1943                &[0xF00F_F11F_0110_EFFE, 0xF0DE_BC9A_7856_3412],
1944            ),
1945        ];
1946
1947        for (src, expected) in cases {
1948            let mut dst = vec![0; expected.len()];
1949            read_le_u64_into(src, &mut dst);
1950
1951            assert_eq!(&dst, expected, "Expected {:x?} got {:x?}", expected, dst);
1952        }
1953    }
1954
1955    // Tests consistency of From<{integer}> impls for Scalar
1956    #[test]
1957    fn test_scalar_from_int() {
1958        let s1 = Scalar::ONE;
1959
1960        // For `x` in `u8`, `u16`, `u32`, `u64`, and `u128`, check that
1961        // `Scalar::from(x + 1) == Scalar::from(x) + Scalar::from(1)`
1962
1963        let x = 0x23u8;
1964        let sx = Scalar::from(x);
1965        assert_eq!(sx + s1, Scalar::from(x + 1));
1966
1967        let x = 0x2323u16;
1968        let sx = Scalar::from(x);
1969        assert_eq!(sx + s1, Scalar::from(x + 1));
1970
1971        let x = 0x2323_2323u32;
1972        let sx = Scalar::from(x);
1973        assert_eq!(sx + s1, Scalar::from(x + 1));
1974
1975        let x = 0x2323_2323_2323_2323u64;
1976        let sx = Scalar::from(x);
1977        assert_eq!(sx + s1, Scalar::from(x + 1));
1978
1979        let x = 0x2323_2323_2323_2323_2323_2323_2323_2323u128;
1980        let sx = Scalar::from(x);
1981        assert_eq!(sx + s1, Scalar::from(x + 1));
1982    }
1983
1984    #[cfg(feature = "group")]
1985    #[test]
1986    fn ff_constants() {
1987        assert_eq!(Scalar::from(2u64) * Scalar::TWO_INV, Scalar::ONE);
1988
1989        assert_eq!(
1990            Scalar::ROOT_OF_UNITY * Scalar::ROOT_OF_UNITY_INV,
1991            Scalar::ONE,
1992        );
1993
1994        // ROOT_OF_UNITY^{2^s} mod m == 1
1995        assert_eq!(
1996            Scalar::ROOT_OF_UNITY.pow(&[1u64 << Scalar::S, 0, 0, 0]),
1997            Scalar::ONE,
1998        );
1999
2000        // DELTA^{t} mod m == 1
2001        assert_eq!(
2002            Scalar::DELTA.pow(&[
2003                0x9604_98c6_973d_74fb,
2004                0x0537_be77_a8bd_e735,
2005                0x0000_0000_0000_0000,
2006                0x0400_0000_0000_0000,
2007            ]),
2008            Scalar::ONE,
2009        );
2010    }
2011
2012    #[cfg(feature = "group")]
2013    #[test]
2014    fn ff_impls() {
2015        assert!(bool::from(Scalar::ZERO.is_even()));
2016        assert!(bool::from(Scalar::ONE.is_odd()));
2017        assert!(bool::from(Scalar::from(2u64).is_even()));
2018        assert!(bool::from(Scalar::DELTA.is_even()));
2019
2020        assert!(bool::from(Field::invert(&Scalar::ZERO).is_none()));
2021        assert_eq!(Field::invert(&X).unwrap(), XINV);
2022
2023        let x_sq = X.square();
2024        // We should get back either the positive or negative root.
2025        assert!([X, -X].contains(&x_sq.sqrt().unwrap()));
2026
2027        assert_eq!(Scalar::from_repr_vartime(X.to_repr()), Some(X));
2028        assert_eq!(Scalar::from_repr_vartime([0xff; 32]), None);
2029
2030        assert_eq!(Scalar::from_repr(X.to_repr()).unwrap(), X);
2031        assert!(bool::from(Scalar::from_repr([0xff; 32]).is_none()));
2032    }
2033
2034    #[test]
2035    #[should_panic]
2036    fn test_read_le_u64_into_should_panic_on_bad_input() {
2037        let mut dst = [0_u64; 1];
2038        // One byte short
2039        read_le_u64_into(&[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F], &mut dst);
2040    }
2041
2042    #[test]
2043    fn test_scalar_clamp() {
2044        let input = A_SCALAR.bytes;
2045        let expected = [
2046            0x18, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
2047            0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
2048            0x23, 0x76, 0xef, 0x49,
2049        ];
2050        let actual = clamp_integer(input);
2051        assert_eq!(actual, expected);
2052
2053        let expected = [
2054            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2055            0, 0, 0x40,
2056        ];
2057        let actual = clamp_integer([0; 32]);
2058        assert_eq!(expected, actual);
2059        let expected = [
2060            0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2061            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2062            0xff, 0xff, 0xff, 0x7f,
2063        ];
2064        let actual = clamp_integer([0xff; 32]);
2065        assert_eq!(actual, expected);
2066
2067        assert_eq!(
2068            LARGEST_CLAMPED_INTEGER,
2069            clamp_integer(LARGEST_CLAMPED_INTEGER)
2070        );
2071    }
2072
2073    // Check that a * b == a.reduce() * a.reduce() for ANY scalars a,b, even ones that violate
2074    // invariant #1, i.e., a,b > 2^255. Old versions of ed25519-dalek did multiplication where a
2075    // was reduced and b was clamped and unreduced. This checks that was always well-defined.
2076    #[test]
2077    fn test_mul_reduction_invariance() {
2078        let mut rng = rand::thread_rng();
2079
2080        for _ in 0..10 {
2081            // Also define c that's clamped. We'll make sure that clamping doesn't affect
2082            // computation
2083            let (a, b, c) = {
2084                let mut a_bytes = [0u8; 32];
2085                let mut b_bytes = [0u8; 32];
2086                let mut c_bytes = [0u8; 32];
2087                rng.fill_bytes(&mut a_bytes);
2088                rng.fill_bytes(&mut b_bytes);
2089                rng.fill_bytes(&mut c_bytes);
2090                (
2091                    Scalar { bytes: a_bytes },
2092                    Scalar { bytes: b_bytes },
2093                    Scalar {
2094                        bytes: clamp_integer(c_bytes),
2095                    },
2096                )
2097            };
2098
2099            // Make sure this is the same product no matter how you cut it
2100            let reduced_mul_ab = a.reduce() * b.reduce();
2101            let reduced_mul_ac = a.reduce() * c.reduce();
2102            assert_eq!(a * b, reduced_mul_ab);
2103            assert_eq!(a.reduce() * b, reduced_mul_ab);
2104            assert_eq!(a * b.reduce(), reduced_mul_ab);
2105            assert_eq!(a * c, reduced_mul_ac);
2106            assert_eq!(a.reduce() * c, reduced_mul_ac);
2107            assert_eq!(a * c.reduce(), reduced_mul_ac);
2108        }
2109    }
2110}