1#![cfg_attr(feature = "digest", doc = "```")]
92#![cfg_attr(not(feature = "digest"), doc = "```ignore")]
93use core::borrow::Borrow;
115use core::fmt::Debug;
116use core::iter::{Product, Sum};
117use core::ops::Index;
118use core::ops::Neg;
119use core::ops::{Add, AddAssign};
120use core::ops::{Mul, MulAssign};
121use core::ops::{Sub, SubAssign};
122
123use cfg_if::cfg_if;
124
125#[cfg(feature = "group")]
126use group::ff::{Field, FromUniformBytes, PrimeField};
127#[cfg(feature = "group-bits")]
128use group::ff::{FieldBits, PrimeFieldBits};
129
130#[cfg(any(test, feature = "group"))]
131use rand_core::RngCore;
132
133#[cfg(any(test, feature = "rand_core"))]
134use rand_core::CryptoRngCore;
135
136#[cfg(feature = "digest")]
137use digest::generic_array::typenum::U64;
138#[cfg(feature = "digest")]
139use digest::Digest;
140
141use subtle::Choice;
142use subtle::ConditionallySelectable;
143use subtle::ConstantTimeEq;
144use subtle::CtOption;
145
146#[cfg(feature = "zeroize")]
147use zeroize::Zeroize;
148
149use crate::backend;
150use crate::constants;
151
152cfg_if! {
153 if #[cfg(curve25519_dalek_backend = "fiat")] {
154 #[cfg(curve25519_dalek_bits = "32")]
159 #[cfg_attr(
160 docsrs,
161 doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "32")))
162 )]
163 type UnpackedScalar = backend::serial::fiat_u32::scalar::Scalar29;
164
165 #[cfg(curve25519_dalek_bits = "64")]
170 #[cfg_attr(
171 docsrs,
172 doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "64")))
173 )]
174 type UnpackedScalar = backend::serial::fiat_u64::scalar::Scalar52;
175 } else if #[cfg(curve25519_dalek_bits = "64")] {
176 #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
181 type UnpackedScalar = backend::serial::u64::scalar::Scalar52;
182 } else {
183 #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
188 type UnpackedScalar = backend::serial::u32::scalar::Scalar29;
189 }
190}
191
192#[allow(clippy::derived_hash_with_manual_eq)]
194#[derive(Copy, Clone, Hash)]
195pub struct Scalar {
196 pub(crate) bytes: [u8; 32],
232}
233
234impl Scalar {
235 pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar {
238 let s_unreduced = Scalar { bytes };
240
241 let s = s_unreduced.reduce();
243 debug_assert_eq!(0u8, s[31] >> 7);
244
245 s
246 }
247
248 pub fn from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar {
251 UnpackedScalar::from_bytes_wide(input).pack()
252 }
253
254 pub fn from_canonical_bytes(bytes: [u8; 32]) -> CtOption<Scalar> {
262 let high_bit_unset = (bytes[31] >> 7).ct_eq(&0);
263 let candidate = Scalar { bytes };
264 CtOption::new(candidate, high_bit_unset & candidate.is_canonical())
265 }
266
267 #[cfg(feature = "legacy_compatibility")]
274 #[deprecated(
275 since = "4.0.0",
276 note = "This constructor outputs scalars with undefined scalar-scalar arithmetic. See docs."
277 )]
278 pub const fn from_bits(bytes: [u8; 32]) -> Scalar {
279 let mut s = Scalar { bytes };
280 s.bytes[31] &= 0b0111_1111;
282
283 s
284 }
285}
286
287impl Debug for Scalar {
288 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
289 write!(f, "Scalar{{\n\tbytes: {:?},\n}}", &self.bytes)
290 }
291}
292
293impl Eq for Scalar {}
294impl PartialEq for Scalar {
295 fn eq(&self, other: &Self) -> bool {
296 self.ct_eq(other).into()
297 }
298}
299
300impl ConstantTimeEq for Scalar {
301 fn ct_eq(&self, other: &Self) -> Choice {
302 self.bytes.ct_eq(&other.bytes)
303 }
304}
305
306impl Index<usize> for Scalar {
307 type Output = u8;
308
309 fn index(&self, _index: usize) -> &u8 {
311 &(self.bytes[_index])
312 }
313}
314
315impl<'a> MulAssign<&'a Scalar> for Scalar {
316 fn mul_assign(&mut self, _rhs: &'a Scalar) {
317 *self = UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack();
318 }
319}
320
321define_mul_assign_variants!(LHS = Scalar, RHS = Scalar);
322
323impl<'a> Mul<&'a Scalar> for &Scalar {
324 type Output = Scalar;
325 fn mul(self, _rhs: &'a Scalar) -> Scalar {
326 UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack()
327 }
328}
329
330define_mul_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
331
332impl<'a> AddAssign<&'a Scalar> for Scalar {
333 fn add_assign(&mut self, _rhs: &'a Scalar) {
334 *self = *self + _rhs;
335 }
336}
337
338define_add_assign_variants!(LHS = Scalar, RHS = Scalar);
339
340impl<'a> Add<&'a Scalar> for &Scalar {
341 type Output = Scalar;
342 #[allow(non_snake_case)]
343 fn add(self, _rhs: &'a Scalar) -> Scalar {
344 UnpackedScalar::add(&self.unpack(), &_rhs.unpack()).pack()
347 }
348}
349
350define_add_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
351
352impl<'a> SubAssign<&'a Scalar> for Scalar {
353 fn sub_assign(&mut self, _rhs: &'a Scalar) {
354 *self = *self - _rhs;
355 }
356}
357
358define_sub_assign_variants!(LHS = Scalar, RHS = Scalar);
359
360impl<'a> Sub<&'a Scalar> for &Scalar {
361 type Output = Scalar;
362 #[allow(non_snake_case)]
363 fn sub(self, rhs: &'a Scalar) -> Scalar {
364 UnpackedScalar::sub(&self.unpack(), &rhs.unpack()).pack()
367 }
368}
369
370define_sub_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
371
372impl Neg for &Scalar {
373 type Output = Scalar;
374 #[allow(non_snake_case)]
375 fn neg(self) -> Scalar {
376 let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R);
377 let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R);
378 UnpackedScalar::sub(&UnpackedScalar::ZERO, &self_mod_l).pack()
379 }
380}
381
382impl Neg for Scalar {
383 type Output = Scalar;
384 fn neg(self) -> Scalar {
385 -&self
386 }
387}
388
389impl ConditionallySelectable for Scalar {
390 fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
391 let mut bytes = [0u8; 32];
392 #[allow(clippy::needless_range_loop)]
393 for i in 0..32 {
394 bytes[i] = u8::conditional_select(&a.bytes[i], &b.bytes[i], choice);
395 }
396 Scalar { bytes }
397 }
398}
399
400#[cfg(feature = "serde")]
401use serde::de::Visitor;
402#[cfg(feature = "serde")]
403use serde::{Deserialize, Deserializer, Serialize, Serializer};
404
405#[cfg(feature = "serde")]
406#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
407impl Serialize for Scalar {
408 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
409 where
410 S: Serializer,
411 {
412 use serde::ser::SerializeTuple;
413 let mut tup = serializer.serialize_tuple(32)?;
414 for byte in self.as_bytes().iter() {
415 tup.serialize_element(byte)?;
416 }
417 tup.end()
418 }
419}
420
421#[cfg(feature = "serde")]
422#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
423impl<'de> Deserialize<'de> for Scalar {
424 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
425 where
426 D: Deserializer<'de>,
427 {
428 struct ScalarVisitor;
429
430 impl<'de> Visitor<'de> for ScalarVisitor {
431 type Value = Scalar;
432
433 fn expecting(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
434 formatter.write_str(
435 "a sequence of 32 bytes whose little-endian interpretation is less than the \
436 basepoint order ℓ",
437 )
438 }
439
440 fn visit_seq<A>(self, mut seq: A) -> Result<Scalar, A::Error>
441 where
442 A: serde::de::SeqAccess<'de>,
443 {
444 let mut bytes = [0u8; 32];
445 #[allow(clippy::needless_range_loop)]
446 for i in 0..32 {
447 bytes[i] = seq
448 .next_element()?
449 .ok_or_else(|| serde::de::Error::invalid_length(i, &"expected 32 bytes"))?;
450 }
451 Option::from(Scalar::from_canonical_bytes(bytes))
452 .ok_or_else(|| serde::de::Error::custom("scalar was not canonically encoded"))
453 }
454 }
455
456 deserializer.deserialize_tuple(32, ScalarVisitor)
457 }
458}
459
460impl<T> Product<T> for Scalar
461where
462 T: Borrow<Scalar>,
463{
464 fn product<I>(iter: I) -> Self
465 where
466 I: Iterator<Item = T>,
467 {
468 iter.fold(Scalar::ONE, |acc, item| acc * item.borrow())
469 }
470}
471
472impl<T> Sum<T> for Scalar
473where
474 T: Borrow<Scalar>,
475{
476 fn sum<I>(iter: I) -> Self
477 where
478 I: Iterator<Item = T>,
479 {
480 iter.fold(Scalar::ZERO, |acc, item| acc + item.borrow())
481 }
482}
483
484impl Default for Scalar {
485 fn default() -> Scalar {
486 Scalar::ZERO
487 }
488}
489
490impl From<u8> for Scalar {
491 fn from(x: u8) -> Scalar {
492 let mut s_bytes = [0u8; 32];
493 s_bytes[0] = x;
494 Scalar { bytes: s_bytes }
495 }
496}
497
498impl From<u16> for Scalar {
499 fn from(x: u16) -> Scalar {
500 let mut s_bytes = [0u8; 32];
501 let x_bytes = x.to_le_bytes();
502 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
503 Scalar { bytes: s_bytes }
504 }
505}
506
507impl From<u32> for Scalar {
508 fn from(x: u32) -> Scalar {
509 let mut s_bytes = [0u8; 32];
510 let x_bytes = x.to_le_bytes();
511 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
512 Scalar { bytes: s_bytes }
513 }
514}
515
516impl From<u64> for Scalar {
517 fn from(x: u64) -> Scalar {
539 let mut s_bytes = [0u8; 32];
540 let x_bytes = x.to_le_bytes();
541 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
542 Scalar { bytes: s_bytes }
543 }
544}
545
546impl From<u128> for Scalar {
547 fn from(x: u128) -> Scalar {
548 let mut s_bytes = [0u8; 32];
549 let x_bytes = x.to_le_bytes();
550 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
551 Scalar { bytes: s_bytes }
552 }
553}
554
555#[cfg(feature = "zeroize")]
556impl Zeroize for Scalar {
557 fn zeroize(&mut self) {
558 self.bytes.zeroize();
559 }
560}
561
562impl Scalar {
563 pub const ZERO: Self = Self { bytes: [0u8; 32] };
565
566 pub const ONE: Self = Self {
568 bytes: [
569 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
570 0, 0, 0,
571 ],
572 };
573
574 #[cfg(any(test, feature = "rand_core"))]
575 pub fn random<R: CryptoRngCore + ?Sized>(rng: &mut R) -> Self {
598 let mut scalar_bytes = [0u8; 64];
599 rng.fill_bytes(&mut scalar_bytes);
600 Scalar::from_bytes_mod_order_wide(&scalar_bytes)
601 }
602
603 #[cfg(feature = "digest")]
604 #[cfg_attr(feature = "digest", doc = "```")]
614 #[cfg_attr(not(feature = "digest"), doc = "```ignore")]
615 pub fn hash_from_bytes<D>(input: &[u8]) -> Scalar
626 where
627 D: Digest<OutputSize = U64> + Default,
628 {
629 let mut hash = D::default();
630 hash.update(input);
631 Scalar::from_hash(hash)
632 }
633
634 #[cfg(feature = "digest")]
635 pub fn from_hash<D>(hash: D) -> Scalar
672 where
673 D: Digest<OutputSize = U64>,
674 {
675 let mut output = [0u8; 64];
676 output.copy_from_slice(hash.finalize().as_slice());
677 Scalar::from_bytes_mod_order_wide(&output)
678 }
679
680 pub const fn to_bytes(&self) -> [u8; 32] {
692 self.bytes
693 }
694
695 pub const fn as_bytes(&self) -> &[u8; 32] {
707 &self.bytes
708 }
709
710 pub fn invert(&self) -> Scalar {
748 self.unpack().invert().pack()
749 }
750
751 #[cfg(feature = "alloc")]
788 pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar {
789 let n = inputs.len();
797 let one: UnpackedScalar = Scalar::ONE.unpack().as_montgomery();
798
799 let mut scratch = vec![one; n];
800
801 let mut acc = Scalar::ONE.unpack().as_montgomery();
803
804 for (input, scratch) in inputs.iter_mut().zip(scratch.iter_mut()) {
807 *scratch = acc;
808
809 let tmp = input.unpack().as_montgomery();
812 *input = tmp.pack();
813 acc = UnpackedScalar::montgomery_mul(&acc, &tmp);
814 }
815
816 debug_assert!(acc.pack() != Scalar::ZERO);
818
819 acc = acc.montgomery_invert().from_montgomery();
821
822 let ret = acc.pack();
824
825 for (input, scratch) in inputs.iter_mut().rev().zip(scratch.iter().rev()) {
828 let tmp = UnpackedScalar::montgomery_mul(&acc, &input.unpack());
829 *input = UnpackedScalar::montgomery_mul(&acc, scratch).pack();
830 acc = tmp;
831 }
832
833 #[cfg(feature = "zeroize")]
834 Zeroize::zeroize(&mut scratch);
835
836 ret
837 }
838
839 pub(crate) fn bits_le(&self) -> impl DoubleEndedIterator<Item = bool> + '_ {
841 (0..256).map(|i| {
842 ((self.bytes[i >> 3] >> (i & 7)) & 1u8) == 1
846 })
847 }
848
849 pub(crate) fn non_adjacent_form(&self, w: usize) -> [i8; 256] {
922 debug_assert!(w >= 2);
924 debug_assert!(w <= 8);
926
927 let mut naf = [0i8; 256];
928
929 let mut x_u64 = [0u64; 5];
930 read_le_u64_into(&self.bytes, &mut x_u64[0..4]);
931
932 let width = 1 << w;
933 let window_mask = width - 1;
934
935 let mut pos = 0;
936 let mut carry = 0;
937 while pos < 256 {
938 let u64_idx = pos / 64;
940 let bit_idx = pos % 64;
941 let bit_buf: u64 = if bit_idx < 64 - w {
942 x_u64[u64_idx] >> bit_idx
944 } else {
945 (x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx))
947 };
948
949 let window = carry + (bit_buf & window_mask);
951
952 if window & 1 == 0 {
953 pos += 1;
958 continue;
959 }
960
961 if window < width / 2 {
962 carry = 0;
963 naf[pos] = window as i8;
964 } else {
965 carry = 1;
966 naf[pos] = (window as i8).wrapping_sub(width as i8);
967 }
968
969 pos += w;
970 }
971
972 naf
973 }
974
975 pub(crate) fn as_radix_16(&self) -> [i8; 64] {
986 debug_assert!(self[31] <= 127);
987 let mut output = [0i8; 64];
988
989 #[allow(clippy::identity_op)]
992 #[inline(always)]
993 fn bot_half(x: u8) -> u8 {
994 (x >> 0) & 15
995 }
996 #[inline(always)]
997 fn top_half(x: u8) -> u8 {
998 (x >> 4) & 15
999 }
1000
1001 for i in 0..32 {
1002 output[2 * i] = bot_half(self[i]) as i8;
1003 output[2 * i + 1] = top_half(self[i]) as i8;
1004 }
1005 for i in 0..63 {
1009 let carry = (output[i] + 8) >> 4;
1010 output[i] -= carry << 4;
1011 output[i + 1] += carry;
1012 }
1013 output
1017 }
1018
1019 #[cfg(any(feature = "alloc", all(test, feature = "precomputed-tables")))]
1022 pub(crate) fn to_radix_2w_size_hint(w: usize) -> usize {
1023 debug_assert!(w >= 4);
1024 debug_assert!(w <= 8);
1025
1026 let digits_count = match w {
1027 4..=7 => (256 + w - 1) / w,
1028 8 => (256 + w - 1) / w + 1_usize,
1030 _ => panic!("invalid radix parameter"),
1031 };
1032
1033 debug_assert!(digits_count <= 64);
1034 digits_count
1035 }
1036
1037 #[cfg(any(feature = "alloc", feature = "precomputed-tables"))]
1059 pub(crate) fn as_radix_2w(&self, w: usize) -> [i8; 64] {
1060 debug_assert!(w >= 4);
1061 debug_assert!(w <= 8);
1062
1063 if w == 4 {
1064 return self.as_radix_16();
1065 }
1066
1067 let mut scalar64x4 = [0u64; 4];
1069 read_le_u64_into(&self.bytes, &mut scalar64x4[0..4]);
1070
1071 let radix: u64 = 1 << w;
1072 let window_mask: u64 = radix - 1;
1073
1074 let mut carry = 0u64;
1075 let mut digits = [0i8; 64];
1076 let digits_count = (256 + w - 1) / w;
1077 #[allow(clippy::needless_range_loop)]
1078 for i in 0..digits_count {
1079 let bit_offset = i * w;
1081 let u64_idx = bit_offset / 64;
1082 let bit_idx = bit_offset % 64;
1083
1084 let bit_buf: u64 = if bit_idx < 64 - w || u64_idx == 3 {
1086 scalar64x4[u64_idx] >> bit_idx
1089 } else {
1090 (scalar64x4[u64_idx] >> bit_idx) | (scalar64x4[1 + u64_idx] << (64 - bit_idx))
1092 };
1093
1094 let coef = carry + (bit_buf & window_mask); carry = (coef + (radix / 2)) >> w;
1099 digits[i] = ((coef as i64) - (carry << w) as i64) as i8;
1100 }
1101
1102 match w {
1111 8 => digits[digits_count] += carry as i8,
1112 _ => digits[digits_count - 1] += (carry << w) as i8,
1113 }
1114
1115 digits
1116 }
1117
1118 pub(crate) fn unpack(&self) -> UnpackedScalar {
1120 UnpackedScalar::from_bytes(&self.bytes)
1121 }
1122
1123 #[allow(non_snake_case)]
1125 fn reduce(&self) -> Scalar {
1126 let x = self.unpack();
1127 let xR = UnpackedScalar::mul_internal(&x, &constants::R);
1128 let x_mod_l = UnpackedScalar::montgomery_reduce(&xR);
1129 x_mod_l.pack()
1130 }
1131
1132 fn is_canonical(&self) -> Choice {
1135 self.ct_eq(&self.reduce())
1136 }
1137}
1138
1139impl UnpackedScalar {
1140 fn pack(&self) -> Scalar {
1142 Scalar {
1143 bytes: self.to_bytes(),
1144 }
1145 }
1146
1147 #[rustfmt::skip] #[allow(clippy::just_underscores_and_digits)]
1150 pub fn montgomery_invert(&self) -> UnpackedScalar {
1151 let _1 = *self;
1154 let _10 = _1.montgomery_square();
1155 let _100 = _10.montgomery_square();
1156 let _11 = UnpackedScalar::montgomery_mul(&_10, &_1);
1157 let _101 = UnpackedScalar::montgomery_mul(&_10, &_11);
1158 let _111 = UnpackedScalar::montgomery_mul(&_10, &_101);
1159 let _1001 = UnpackedScalar::montgomery_mul(&_10, &_111);
1160 let _1011 = UnpackedScalar::montgomery_mul(&_10, &_1001);
1161 let _1111 = UnpackedScalar::montgomery_mul(&_100, &_1011);
1162
1163 let mut y = UnpackedScalar::montgomery_mul(&_1111, &_1);
1165
1166 #[inline]
1167 fn square_multiply(y: &mut UnpackedScalar, squarings: usize, x: &UnpackedScalar) {
1168 let mut i = 0;
1169 while i < squarings {
1170 *y = y.montgomery_square();
1171 i += 1;
1172 }
1173 *y = UnpackedScalar::montgomery_mul(y, x);
1174 }
1175
1176 square_multiply(&mut y, 123 + 3, &_101);
1177 square_multiply(&mut y, 2 + 2, &_11);
1178 square_multiply(&mut y, 1 + 4, &_1111);
1179 square_multiply(&mut y, 1 + 4, &_1111);
1180 square_multiply(&mut y, 4, &_1001);
1181 square_multiply(&mut y, 2, &_11);
1182 square_multiply(&mut y, 1 + 4, &_1111);
1183 square_multiply(&mut y, 1 + 3, &_101);
1184 square_multiply(&mut y, 3 + 3, &_101);
1185 square_multiply(&mut y, 3, &_111);
1186 square_multiply(&mut y, 1 + 4, &_1111);
1187 square_multiply(&mut y, 2 + 3, &_111);
1188 square_multiply(&mut y, 2 + 2, &_11);
1189 square_multiply(&mut y, 1 + 4, &_1011);
1190 square_multiply(&mut y, 2 + 4, &_1011);
1191 square_multiply(&mut y, 6 + 4, &_1001);
1192 square_multiply(&mut y, 2 + 2, &_11);
1193 square_multiply(&mut y, 3 + 2, &_11);
1194 square_multiply(&mut y, 3 + 2, &_11);
1195 square_multiply(&mut y, 1 + 4, &_1001);
1196 square_multiply(&mut y, 1 + 3, &_111);
1197 square_multiply(&mut y, 2 + 4, &_1111);
1198 square_multiply(&mut y, 1 + 4, &_1011);
1199 square_multiply(&mut y, 3, &_101);
1200 square_multiply(&mut y, 2 + 4, &_1111);
1201 square_multiply(&mut y, 3, &_101);
1202 square_multiply(&mut y, 1 + 2, &_11);
1203
1204 y
1205 }
1206
1207 pub fn invert(&self) -> UnpackedScalar {
1209 self.as_montgomery().montgomery_invert().from_montgomery()
1210 }
1211}
1212
1213#[cfg(feature = "group")]
1214impl Field for Scalar {
1215 const ZERO: Self = Self::ZERO;
1216 const ONE: Self = Self::ONE;
1217
1218 fn random(mut rng: impl RngCore) -> Self {
1219 let mut scalar_bytes = [0u8; 64];
1221 rng.fill_bytes(&mut scalar_bytes);
1222 Self::from_bytes_mod_order_wide(&scalar_bytes)
1223 }
1224
1225 fn square(&self) -> Self {
1226 self * self
1227 }
1228
1229 fn double(&self) -> Self {
1230 self + self
1231 }
1232
1233 fn invert(&self) -> CtOption<Self> {
1234 CtOption::new(self.invert(), !self.is_zero())
1235 }
1236
1237 fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) {
1238 #[allow(unused_qualifications)]
1239 group::ff::helpers::sqrt_ratio_generic(num, div)
1240 }
1241
1242 fn sqrt(&self) -> CtOption<Self> {
1243 #[allow(unused_qualifications)]
1244 group::ff::helpers::sqrt_tonelli_shanks(
1245 self,
1246 [
1247 0xcb02_4c63_4b9e_ba7d,
1248 0x029b_df3b_d45e_f39a,
1249 0x0000_0000_0000_0000,
1250 0x0200_0000_0000_0000,
1251 ],
1252 )
1253 }
1254}
1255
1256#[cfg(feature = "group")]
1257impl PrimeField for Scalar {
1258 type Repr = [u8; 32];
1259
1260 fn from_repr(repr: Self::Repr) -> CtOption<Self> {
1261 Self::from_canonical_bytes(repr)
1262 }
1263
1264 fn from_repr_vartime(repr: Self::Repr) -> Option<Self> {
1265 if (repr[31] >> 7) != 0u8 {
1267 return None;
1268 }
1269
1270 let candidate = Scalar { bytes: repr };
1271
1272 if candidate == candidate.reduce() {
1273 Some(candidate)
1274 } else {
1275 None
1276 }
1277 }
1278
1279 fn to_repr(&self) -> Self::Repr {
1280 self.to_bytes()
1281 }
1282
1283 fn is_odd(&self) -> Choice {
1284 Choice::from(self.as_bytes()[0] & 1)
1285 }
1286
1287 const MODULUS: &'static str =
1288 "0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed";
1289 const NUM_BITS: u32 = 253;
1290 const CAPACITY: u32 = 252;
1291
1292 const TWO_INV: Self = Self {
1293 bytes: [
1294 0xf7, 0xe9, 0x7a, 0x2e, 0x8d, 0x31, 0x09, 0x2c, 0x6b, 0xce, 0x7b, 0x51, 0xef, 0x7c,
1295 0x6f, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1296 0x00, 0x00, 0x00, 0x08,
1297 ],
1298 };
1299 const MULTIPLICATIVE_GENERATOR: Self = Self {
1300 bytes: [
1301 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1302 0, 0, 0,
1303 ],
1304 };
1305 const S: u32 = 2;
1306 const ROOT_OF_UNITY: Self = Self {
1307 bytes: [
1308 0xd4, 0x07, 0xbe, 0xeb, 0xdf, 0x75, 0x87, 0xbe, 0xfe, 0x83, 0xce, 0x42, 0x53, 0x56,
1309 0xf0, 0x0e, 0x7a, 0xc2, 0xc1, 0xab, 0x60, 0x6d, 0x3d, 0x7d, 0xe7, 0x81, 0x79, 0xe0,
1310 0x10, 0x73, 0x4a, 0x09,
1311 ],
1312 };
1313 const ROOT_OF_UNITY_INV: Self = Self {
1314 bytes: [
1315 0x19, 0xcc, 0x37, 0x71, 0x3a, 0xed, 0x8a, 0x99, 0xd7, 0x18, 0x29, 0x60, 0x8b, 0xa3,
1316 0xee, 0x05, 0x86, 0x3d, 0x3e, 0x54, 0x9f, 0x92, 0xc2, 0x82, 0x18, 0x7e, 0x86, 0x1f,
1317 0xef, 0x8c, 0xb5, 0x06,
1318 ],
1319 };
1320 const DELTA: Self = Self {
1321 bytes: [
1322 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1323 0, 0, 0,
1324 ],
1325 };
1326}
1327
1328#[cfg(feature = "group-bits")]
1329impl PrimeFieldBits for Scalar {
1330 type ReprBits = [u8; 32];
1331
1332 fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
1333 self.to_repr().into()
1334 }
1335
1336 fn char_le_bits() -> FieldBits<Self::ReprBits> {
1337 constants::BASEPOINT_ORDER_PRIVATE.to_bytes().into()
1338 }
1339}
1340
1341#[cfg(feature = "group")]
1342impl FromUniformBytes<64> for Scalar {
1343 fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
1344 Scalar::from_bytes_mod_order_wide(bytes)
1345 }
1346}
1347
1348fn read_le_u64_into(src: &[u8], dst: &mut [u64]) {
1353 assert!(src.len() == 8 * dst.len());
1354 let mut i = 0;
1355 while i < dst.len() {
1356 let start = i * 8;
1357 dst[i] = u64::from_le_bytes([
1358 src[start],
1359 src[start + 1],
1360 src[start + 2],
1361 src[start + 3],
1362 src[start + 4],
1363 src[start + 5],
1364 src[start + 6],
1365 src[start + 7],
1366 ]);
1367 i += 1;
1368 }
1369}
1370
1371#[must_use]
1391pub const fn clamp_integer(mut bytes: [u8; 32]) -> [u8; 32] {
1392 bytes[0] &= 0b1111_1000;
1393 bytes[31] &= 0b0111_1111;
1394 bytes[31] |= 0b0100_0000;
1395 bytes
1396}
1397
1398#[cfg(test)]
1399pub(crate) mod test {
1400 use super::*;
1401
1402 #[cfg(feature = "alloc")]
1403 use alloc::vec::Vec;
1404
1405 pub static X: Scalar = Scalar {
1407 bytes: [
1408 0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84, 0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2,
1409 0x7d, 0x52, 0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44, 0xd4, 0x49, 0xf4, 0xa8,
1410 0x79, 0xd9, 0xf2, 0x04,
1411 ],
1412 };
1413 pub static XINV: Scalar = Scalar {
1415 bytes: [
1416 0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb, 0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01,
1417 0x63, 0x47, 0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96, 0xd5, 0x0b, 0xcd, 0x7a,
1418 0x3f, 0x96, 0x2a, 0x0f,
1419 ],
1420 };
1421 pub static Y: Scalar = Scalar {
1423 bytes: [
1424 0x90, 0x76, 0x33, 0xfe, 0x1c, 0x4b, 0x66, 0xa4, 0xa2, 0x8d, 0x2d, 0xd7, 0x67, 0x83,
1425 0x86, 0xc3, 0x53, 0xd0, 0xde, 0x54, 0x55, 0xd4, 0xfc, 0x9d, 0xe8, 0xef, 0x7a, 0xc3,
1426 0x1f, 0x35, 0xbb, 0x05,
1427 ],
1428 };
1429
1430 pub(crate) static LARGEST_UNREDUCED_SCALAR: Scalar = Scalar {
1436 bytes: [
1437 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1438 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1439 0xff, 0xff, 0xff, 0x7f,
1440 ],
1441 };
1442
1443 static X_TIMES_Y: Scalar = Scalar {
1445 bytes: [
1446 0x6c, 0x33, 0x74, 0xa1, 0x89, 0x4f, 0x62, 0x21, 0x0a, 0xaa, 0x2f, 0xe1, 0x86, 0xa6,
1447 0xf9, 0x2c, 0xe0, 0xaa, 0x75, 0xc2, 0x77, 0x95, 0x81, 0xc2, 0x95, 0xfc, 0x08, 0x17,
1448 0x9a, 0x73, 0x94, 0x0c,
1449 ],
1450 };
1451
1452 static CANONICAL_2_256_MINUS_1: Scalar = Scalar {
1456 bytes: [
1457 28, 149, 152, 141, 116, 49, 236, 214, 112, 207, 125, 115, 244, 91, 239, 198, 254, 255,
1458 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15,
1459 ],
1460 };
1461
1462 static A_SCALAR: Scalar = Scalar {
1463 bytes: [
1464 0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
1465 0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
1466 0x23, 0x76, 0xef, 0x09,
1467 ],
1468 };
1469
1470 static A_NAF: [i8; 256] = [
1471 0, 13, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, -11, 0, 0, 0, 0, 3, 0, 0,
1472 0, 0, 1, 0, 0, 0, 0, 9, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 11, 0, 0, 0, 0,
1473 11, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
1474 0, -1, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, -15, 0, 0, 0, 0, -7, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 5,
1475 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, -11, 0, 0, 0, 0, -7, 0, 0, 0, 0, -13, 0, 0,
1476 0, 0, 11, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1477 7, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 15,
1478 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, -15, 0,
1479 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1480 ];
1481
1482 const BASEPOINT_ORDER_MINUS_ONE: Scalar = Scalar {
1483 bytes: [
1484 0xec, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9,
1485 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1486 0x00, 0x00, 0x00, 0x10,
1487 ],
1488 };
1489
1490 static LARGEST_CLAMPED_INTEGER: [u8; 32] = clamp_integer(LARGEST_UNREDUCED_SCALAR.bytes);
1492
1493 #[test]
1494 fn fuzzer_testcase_reduction() {
1495 let a_bytes = [
1497 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
1498 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1499 ];
1500 let b_bytes = [
1502 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 210, 210,
1503 210, 255, 255, 255, 255, 10,
1504 ];
1505 let c_bytes = [
1507 134, 171, 119, 216, 180, 128, 178, 62, 171, 132, 32, 62, 34, 119, 104, 193, 47, 215,
1508 181, 250, 14, 207, 172, 93, 75, 207, 211, 103, 144, 204, 56, 14,
1509 ];
1510
1511 let a = Scalar::from_bytes_mod_order(a_bytes);
1512 let b = Scalar::from_bytes_mod_order(b_bytes);
1513 let c = Scalar::from_bytes_mod_order(c_bytes);
1514
1515 let mut tmp = [0u8; 64];
1516
1517 tmp[0..32].copy_from_slice(&a_bytes[..]);
1519 let also_a = Scalar::from_bytes_mod_order_wide(&tmp);
1520
1521 tmp[0..32].copy_from_slice(&b_bytes[..]);
1523 let also_b = Scalar::from_bytes_mod_order_wide(&tmp);
1524
1525 let expected_c = a * b;
1526 let also_expected_c = also_a * also_b;
1527
1528 assert_eq!(c, expected_c);
1529 assert_eq!(c, also_expected_c);
1530 }
1531
1532 #[test]
1533 fn non_adjacent_form_test_vector() {
1534 let naf = A_SCALAR.non_adjacent_form(5);
1535 for i in 0..256 {
1536 assert_eq!(naf[i], A_NAF[i]);
1537 }
1538 }
1539
1540 fn non_adjacent_form_iter(w: usize, x: &Scalar) {
1541 let naf = x.non_adjacent_form(w);
1542
1543 let mut y = Scalar::ZERO;
1545 for i in (0..256).rev() {
1546 y += y;
1547 let digit = if naf[i] < 0 {
1548 -Scalar::from((-naf[i]) as u64)
1549 } else {
1550 Scalar::from(naf[i] as u64)
1551 };
1552 y += digit;
1553 }
1554
1555 assert_eq!(*x, y);
1556 }
1557
1558 #[test]
1559 fn non_adjacent_form_random() {
1560 let mut rng = rand::thread_rng();
1561 for _ in 0..1_000 {
1562 let x = Scalar::random(&mut rng);
1563 for w in &[5, 6, 7, 8] {
1564 non_adjacent_form_iter(*w, &x);
1565 }
1566 }
1567 }
1568
1569 #[test]
1570 fn from_u64() {
1571 let val: u64 = 0xdeadbeefdeadbeef;
1572 let s = Scalar::from(val);
1573 assert_eq!(s[7], 0xde);
1574 assert_eq!(s[6], 0xad);
1575 assert_eq!(s[5], 0xbe);
1576 assert_eq!(s[4], 0xef);
1577 assert_eq!(s[3], 0xde);
1578 assert_eq!(s[2], 0xad);
1579 assert_eq!(s[1], 0xbe);
1580 assert_eq!(s[0], 0xef);
1581 }
1582
1583 #[test]
1584 fn scalar_mul_by_one() {
1585 let test_scalar = X * Scalar::ONE;
1586 for i in 0..32 {
1587 assert!(test_scalar[i] == X[i]);
1588 }
1589 }
1590
1591 #[test]
1592 fn add_reduces() {
1593 assert_eq!(BASEPOINT_ORDER_MINUS_ONE + Scalar::ONE, Scalar::ZERO);
1595 }
1596
1597 #[test]
1598 fn sub_reduces() {
1599 assert_eq!(Scalar::ZERO - Scalar::ONE, BASEPOINT_ORDER_MINUS_ONE);
1601 }
1602
1603 #[test]
1604 fn impl_add() {
1605 let two = Scalar::from(2u64);
1606 let one = Scalar::ONE;
1607 let should_be_two = one + one;
1608 assert_eq!(should_be_two, two);
1609 }
1610
1611 #[allow(non_snake_case)]
1612 #[test]
1613 fn impl_mul() {
1614 let should_be_X_times_Y = X * Y;
1615 assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1616 }
1617
1618 #[allow(non_snake_case)]
1619 #[test]
1620 #[cfg(feature = "alloc")]
1621 fn impl_product() {
1622 let X_Y_vector = [X, Y];
1624 let should_be_X_times_Y: Scalar = X_Y_vector.iter().product();
1625 assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1626
1627 let one = Scalar::ONE;
1629 let empty_vector = [];
1630 let should_be_one: Scalar = empty_vector.iter().product();
1631 assert_eq!(should_be_one, one);
1632
1633 let xs = [Scalar::from(2u64); 10];
1635 let ys = [Scalar::from(3u64); 10];
1636 let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x * y);
1638
1639 let x_prod: Scalar = xs.iter().product();
1640 let y_prod: Scalar = ys.iter().product();
1641 let z_prod: Scalar = zs.product();
1642
1643 assert_eq!(x_prod, Scalar::from(1024u64));
1644 assert_eq!(y_prod, Scalar::from(59049u64));
1645 assert_eq!(z_prod, Scalar::from(60466176u64));
1646 assert_eq!(x_prod * y_prod, z_prod);
1647 }
1648
1649 #[test]
1650 #[cfg(feature = "alloc")]
1651 fn impl_sum() {
1652 let two = Scalar::from(2u64);
1654 let one_vector = [Scalar::ONE, Scalar::ONE];
1655 let should_be_two: Scalar = one_vector.iter().sum();
1656 assert_eq!(should_be_two, two);
1657
1658 let zero = Scalar::ZERO;
1660 let empty_vector = [];
1661 let should_be_zero: Scalar = empty_vector.iter().sum();
1662 assert_eq!(should_be_zero, zero);
1663
1664 let xs = [Scalar::from(1u64); 10];
1666 let ys = [Scalar::from(2u64); 10];
1667 let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x + y);
1669
1670 let x_sum: Scalar = xs.iter().sum();
1671 let y_sum: Scalar = ys.iter().sum();
1672 let z_sum: Scalar = zs.sum();
1673
1674 assert_eq!(x_sum, Scalar::from(10u64));
1675 assert_eq!(y_sum, Scalar::from(20u64));
1676 assert_eq!(z_sum, Scalar::from(30u64));
1677 assert_eq!(x_sum + y_sum, z_sum);
1678 }
1679
1680 #[test]
1681 fn square() {
1682 let expected = X * X;
1683 let actual = X.unpack().square().pack();
1684 for i in 0..32 {
1685 assert!(expected[i] == actual[i]);
1686 }
1687 }
1688
1689 #[test]
1690 fn reduce() {
1691 let biggest = Scalar::from_bytes_mod_order([0xff; 32]);
1692 assert_eq!(biggest, CANONICAL_2_256_MINUS_1);
1693 }
1694
1695 #[test]
1696 fn from_bytes_mod_order_wide() {
1697 let mut bignum = [0u8; 64];
1698 for i in 0..32 {
1700 bignum[i] = X[i];
1701 bignum[32 + i] = X[i];
1702 }
1703 let reduced = Scalar {
1706 bytes: [
1707 216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1708 91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1709 ],
1710 };
1711 let test_red = Scalar::from_bytes_mod_order_wide(&bignum);
1712 for i in 0..32 {
1713 assert!(test_red[i] == reduced[i]);
1714 }
1715 }
1716
1717 #[allow(non_snake_case)]
1718 #[test]
1719 fn invert() {
1720 let inv_X = X.invert();
1721 assert_eq!(inv_X, XINV);
1722 let should_be_one = inv_X * X;
1723 assert_eq!(should_be_one, Scalar::ONE);
1724 }
1725
1726 #[allow(non_snake_case)]
1728 #[test]
1729 fn neg_twice_is_identity() {
1730 let negative_X = -&X;
1731 let should_be_X = -&negative_X;
1732
1733 assert_eq!(should_be_X, X);
1734 }
1735
1736 #[test]
1737 fn to_bytes_from_bytes_roundtrips() {
1738 let unpacked = X.unpack();
1739 let bytes = unpacked.to_bytes();
1740 let should_be_unpacked = UnpackedScalar::from_bytes(&bytes);
1741
1742 assert_eq!(should_be_unpacked.0, unpacked.0);
1743 }
1744
1745 #[test]
1746 fn montgomery_reduce_matches_from_bytes_mod_order_wide() {
1747 let mut bignum = [0u8; 64];
1748
1749 for i in 0..32 {
1751 bignum[i] = X[i];
1752 bignum[32 + i] = X[i];
1753 }
1754 let expected = Scalar {
1757 bytes: [
1758 216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1759 91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1760 ],
1761 };
1762 let reduced = Scalar::from_bytes_mod_order_wide(&bignum);
1763
1764 assert_eq!(reduced.bytes, expected.bytes);
1766
1767 let interim =
1769 UnpackedScalar::mul_internal(&UnpackedScalar::from_bytes_wide(&bignum), &constants::R);
1770 let montgomery_reduced = UnpackedScalar::montgomery_reduce(&interim);
1772
1773 assert_eq!(montgomery_reduced.0, reduced.unpack().0);
1775 assert_eq!(montgomery_reduced.0, expected.unpack().0)
1776 }
1777
1778 #[test]
1779 fn canonical_decoding() {
1780 let canonical_bytes = [
1782 99, 99, 99, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1783 0, 0, 0, 0,
1784 ];
1785
1786 let non_canonical_bytes_because_unreduced = [16; 32];
1791
1792 let non_canonical_bytes_because_highbit = [
1794 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1795 0, 0, 128,
1796 ];
1797
1798 assert!(bool::from(
1799 Scalar::from_canonical_bytes(canonical_bytes).is_some()
1800 ));
1801 assert!(bool::from(
1802 Scalar::from_canonical_bytes(non_canonical_bytes_because_unreduced).is_none()
1803 ));
1804 assert!(bool::from(
1805 Scalar::from_canonical_bytes(non_canonical_bytes_because_highbit).is_none()
1806 ));
1807 }
1808
1809 #[test]
1810 #[cfg(feature = "serde")]
1811 fn serde_bincode_scalar_roundtrip() {
1812 use bincode;
1813 let encoded = bincode::serialize(&X).unwrap();
1814 let parsed: Scalar = bincode::deserialize(&encoded).unwrap();
1815 assert_eq!(parsed, X);
1816
1817 assert_eq!(encoded.len(), 32);
1819
1820 assert_eq!(X, bincode::deserialize(X.as_bytes()).unwrap(),);
1822 }
1823
1824 #[cfg(all(debug_assertions, feature = "alloc"))]
1825 #[test]
1826 #[should_panic]
1827 fn batch_invert_with_a_zero_input_panics() {
1828 let mut xs = vec![Scalar::ONE; 16];
1829 xs[3] = Scalar::ZERO;
1830 Scalar::batch_invert(&mut xs);
1832 }
1833
1834 #[test]
1835 #[cfg(feature = "alloc")]
1836 fn batch_invert_empty() {
1837 assert_eq!(Scalar::ONE, Scalar::batch_invert(&mut []));
1838 }
1839
1840 #[test]
1841 #[cfg(feature = "alloc")]
1842 fn batch_invert_consistency() {
1843 let mut x = Scalar::from(1u64);
1844 let mut v1: Vec<_> = (0..16)
1845 .map(|_| {
1846 let tmp = x;
1847 x = x + x;
1848 tmp
1849 })
1850 .collect();
1851 let v2 = v1.clone();
1852
1853 let expected: Scalar = v1.iter().product();
1854 let expected = expected.invert();
1855 let ret = Scalar::batch_invert(&mut v1);
1856 assert_eq!(ret, expected);
1857
1858 for (a, b) in v1.iter().zip(v2.iter()) {
1859 assert_eq!(a * b, Scalar::ONE);
1860 }
1861 }
1862
1863 #[cfg(feature = "precomputed-tables")]
1864 fn test_pippenger_radix_iter(scalar: Scalar, w: usize) {
1865 let digits_count = Scalar::to_radix_2w_size_hint(w);
1866 let digits = scalar.as_radix_2w(w);
1867
1868 let radix = Scalar::from((1 << w) as u64);
1869 let mut term = Scalar::ONE;
1870 let mut recovered_scalar = Scalar::ZERO;
1871 for digit in &digits[0..digits_count] {
1872 let digit = *digit;
1873 if digit != 0 {
1874 let sdigit = if digit < 0 {
1875 -Scalar::from((-(digit as i64)) as u64)
1876 } else {
1877 Scalar::from(digit as u64)
1878 };
1879 recovered_scalar += term * sdigit;
1880 }
1881 term *= radix;
1882 }
1883 assert_eq!(recovered_scalar, scalar.reduce());
1885 }
1886
1887 #[test]
1888 #[cfg(feature = "precomputed-tables")]
1889 fn test_pippenger_radix() {
1890 use core::iter;
1891 let cases = (2..100)
1894 .map(|s| Scalar::from(s as u64).invert())
1895 .chain(iter::once(LARGEST_UNREDUCED_SCALAR));
1898
1899 for scalar in cases {
1900 test_pippenger_radix_iter(scalar, 6);
1901 test_pippenger_radix_iter(scalar, 7);
1902 test_pippenger_radix_iter(scalar, 8);
1903 }
1904 }
1905
1906 #[test]
1907 #[cfg(feature = "alloc")]
1908 fn test_read_le_u64_into() {
1909 let cases: &[(&[u8], &[u64])] = &[
1910 (
1911 &[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0],
1912 &[0xF00F_F11F_0110_EFFE],
1913 ),
1914 (
1915 &[
1916 0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0, 0x12, 0x34, 0x56, 0x78, 0x9A,
1917 0xBC, 0xDE, 0xF0,
1918 ],
1919 &[0xF00F_F11F_0110_EFFE, 0xF0DE_BC9A_7856_3412],
1920 ),
1921 ];
1922
1923 for (src, expected) in cases {
1924 let mut dst = vec![0; expected.len()];
1925 read_le_u64_into(src, &mut dst);
1926
1927 assert_eq!(&dst, expected, "Expected {:x?} got {:x?}", expected, dst);
1928 }
1929 }
1930
1931 #[test]
1933 fn test_scalar_from_int() {
1934 let s1 = Scalar::ONE;
1935
1936 let x = 0x23u8;
1940 let sx = Scalar::from(x);
1941 assert_eq!(sx + s1, Scalar::from(x + 1));
1942
1943 let x = 0x2323u16;
1944 let sx = Scalar::from(x);
1945 assert_eq!(sx + s1, Scalar::from(x + 1));
1946
1947 let x = 0x2323_2323u32;
1948 let sx = Scalar::from(x);
1949 assert_eq!(sx + s1, Scalar::from(x + 1));
1950
1951 let x = 0x2323_2323_2323_2323u64;
1952 let sx = Scalar::from(x);
1953 assert_eq!(sx + s1, Scalar::from(x + 1));
1954
1955 let x = 0x2323_2323_2323_2323_2323_2323_2323_2323u128;
1956 let sx = Scalar::from(x);
1957 assert_eq!(sx + s1, Scalar::from(x + 1));
1958 }
1959
1960 #[cfg(feature = "group")]
1961 #[test]
1962 fn ff_constants() {
1963 assert_eq!(Scalar::from(2u64) * Scalar::TWO_INV, Scalar::ONE);
1964
1965 assert_eq!(
1966 Scalar::ROOT_OF_UNITY * Scalar::ROOT_OF_UNITY_INV,
1967 Scalar::ONE,
1968 );
1969
1970 assert_eq!(
1972 Scalar::ROOT_OF_UNITY.pow(&[1u64 << Scalar::S, 0, 0, 0]),
1973 Scalar::ONE,
1974 );
1975
1976 assert_eq!(
1978 Scalar::DELTA.pow(&[
1979 0x9604_98c6_973d_74fb,
1980 0x0537_be77_a8bd_e735,
1981 0x0000_0000_0000_0000,
1982 0x0400_0000_0000_0000,
1983 ]),
1984 Scalar::ONE,
1985 );
1986 }
1987
1988 #[cfg(feature = "group")]
1989 #[test]
1990 fn ff_impls() {
1991 assert!(bool::from(Scalar::ZERO.is_even()));
1992 assert!(bool::from(Scalar::ONE.is_odd()));
1993 assert!(bool::from(Scalar::from(2u64).is_even()));
1994 assert!(bool::from(Scalar::DELTA.is_even()));
1995
1996 assert!(bool::from(Field::invert(&Scalar::ZERO).is_none()));
1997 assert_eq!(Field::invert(&X).unwrap(), XINV);
1998
1999 let x_sq = X.square();
2000 assert!([X, -X].contains(&x_sq.sqrt().unwrap()));
2002
2003 assert_eq!(Scalar::from_repr_vartime(X.to_repr()), Some(X));
2004 assert_eq!(Scalar::from_repr_vartime([0xff; 32]), None);
2005
2006 assert_eq!(Scalar::from_repr(X.to_repr()).unwrap(), X);
2007 assert!(bool::from(Scalar::from_repr([0xff; 32]).is_none()));
2008 }
2009
2010 #[test]
2011 #[should_panic]
2012 fn test_read_le_u64_into_should_panic_on_bad_input() {
2013 let mut dst = [0_u64; 1];
2014 read_le_u64_into(&[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F], &mut dst);
2016 }
2017
2018 #[test]
2019 fn test_scalar_clamp() {
2020 let input = A_SCALAR.bytes;
2021 let expected = [
2022 0x18, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
2023 0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
2024 0x23, 0x76, 0xef, 0x49,
2025 ];
2026 let actual = clamp_integer(input);
2027 assert_eq!(actual, expected);
2028
2029 let expected = [
2030 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2031 0, 0, 0x40,
2032 ];
2033 let actual = clamp_integer([0; 32]);
2034 assert_eq!(expected, actual);
2035 let expected = [
2036 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2037 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2038 0xff, 0xff, 0xff, 0x7f,
2039 ];
2040 let actual = clamp_integer([0xff; 32]);
2041 assert_eq!(actual, expected);
2042
2043 assert_eq!(
2044 LARGEST_CLAMPED_INTEGER,
2045 clamp_integer(LARGEST_CLAMPED_INTEGER)
2046 );
2047 }
2048
2049 #[test]
2053 fn test_mul_reduction_invariance() {
2054 let mut rng = rand::thread_rng();
2055
2056 for _ in 0..10 {
2057 let (a, b, c) = {
2060 let mut a_bytes = [0u8; 32];
2061 let mut b_bytes = [0u8; 32];
2062 let mut c_bytes = [0u8; 32];
2063 rng.fill_bytes(&mut a_bytes);
2064 rng.fill_bytes(&mut b_bytes);
2065 rng.fill_bytes(&mut c_bytes);
2066 (
2067 Scalar { bytes: a_bytes },
2068 Scalar { bytes: b_bytes },
2069 Scalar {
2070 bytes: clamp_integer(c_bytes),
2071 },
2072 )
2073 };
2074
2075 let reduced_mul_ab = a.reduce() * b.reduce();
2077 let reduced_mul_ac = a.reduce() * c.reduce();
2078 assert_eq!(a * b, reduced_mul_ab);
2079 assert_eq!(a.reduce() * b, reduced_mul_ab);
2080 assert_eq!(a * b.reduce(), reduced_mul_ab);
2081 assert_eq!(a * c, reduced_mul_ac);
2082 assert_eq!(a.reduce() * c, reduced_mul_ac);
2083 assert_eq!(a * c.reduce(), reduced_mul_ac);
2084 }
2085 }
2086}