1#![cfg_attr(feature = "digest", doc = "```")]
92#![cfg_attr(not(feature = "digest"), doc = "```ignore")]
93use core::borrow::Borrow;
115use core::fmt::Debug;
116use core::iter::{Product, Sum};
117use core::ops::Index;
118use core::ops::Neg;
119use core::ops::{Add, AddAssign};
120use core::ops::{Mul, MulAssign};
121use core::ops::{Sub, SubAssign};
122
123use cfg_if::cfg_if;
124
125#[cfg(feature = "group")]
126use group::ff::{Field, FromUniformBytes, PrimeField};
127#[cfg(feature = "group-bits")]
128use group::ff::{FieldBits, PrimeFieldBits};
129
130#[cfg(any(test, feature = "group"))]
131use rand_core::RngCore;
132
133#[cfg(any(test, feature = "rand_core"))]
134use rand_core::CryptoRngCore;
135
136#[cfg(feature = "digest")]
137use digest::generic_array::typenum::U64;
138#[cfg(feature = "digest")]
139use digest::Digest;
140
141use subtle::Choice;
142use subtle::ConditionallySelectable;
143use subtle::ConstantTimeEq;
144use subtle::CtOption;
145
146#[cfg(feature = "zeroize")]
147use zeroize::Zeroize;
148
149use crate::backend;
150use crate::constants;
151
152cfg_if! {
153 if #[cfg(curve25519_dalek_backend = "fiat")] {
154 #[cfg(curve25519_dalek_bits = "32")]
159 #[cfg_attr(
160 docsrs,
161 doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "32")))
162 )]
163 type UnpackedScalar = backend::serial::fiat_u32::scalar::Scalar29;
164
165 #[cfg(curve25519_dalek_bits = "64")]
170 #[cfg_attr(
171 docsrs,
172 doc(cfg(all(feature = "fiat_backend", curve25519_dalek_bits = "64")))
173 )]
174 type UnpackedScalar = backend::serial::fiat_u64::scalar::Scalar52;
175 } else if #[cfg(curve25519_dalek_bits = "64")] {
176 #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
181 type UnpackedScalar = backend::serial::u64::scalar::Scalar52;
182 } else {
183 #[cfg_attr(docsrs, doc(cfg(curve25519_dalek_bits = "64")))]
188 type UnpackedScalar = backend::serial::u32::scalar::Scalar29;
189 }
190}
191
192#[allow(clippy::derived_hash_with_manual_eq)]
194#[derive(Copy, Clone, Hash)]
195pub struct Scalar {
196 pub(crate) bytes: [u8; 32],
232}
233
234impl Scalar {
235 pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar {
238 let s_unreduced = Scalar { bytes };
240
241 let s = s_unreduced.reduce();
243 debug_assert_eq!(0u8, s[31] >> 7);
244
245 s
246 }
247
248 pub fn from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar {
251 UnpackedScalar::from_bytes_wide(input).pack()
252 }
253
254 pub fn from_canonical_bytes(bytes: [u8; 32]) -> CtOption<Scalar> {
262 let high_bit_unset = (bytes[31] >> 7).ct_eq(&0);
263 let candidate = Scalar { bytes };
264 CtOption::new(candidate, high_bit_unset & candidate.is_canonical())
265 }
266
267 #[cfg(feature = "legacy_compatibility")]
274 #[deprecated(
275 since = "4.0.0",
276 note = "This constructor outputs scalars with undefined scalar-scalar arithmetic. See docs."
277 )]
278 pub const fn from_bits(bytes: [u8; 32]) -> Scalar {
279 let mut s = Scalar { bytes };
280 s.bytes[31] &= 0b0111_1111;
282
283 s
284 }
285}
286
287impl Debug for Scalar {
288 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
289 write!(f, "Scalar{{\n\tbytes: {:?},\n}}", &self.bytes)
290 }
291}
292
293impl Eq for Scalar {}
294impl PartialEq for Scalar {
295 fn eq(&self, other: &Self) -> bool {
296 self.ct_eq(other).into()
297 }
298}
299
300impl ConstantTimeEq for Scalar {
301 fn ct_eq(&self, other: &Self) -> Choice {
302 self.bytes.ct_eq(&other.bytes)
303 }
304}
305
306impl Index<usize> for Scalar {
307 type Output = u8;
308
309 fn index(&self, _index: usize) -> &u8 {
311 &(self.bytes[_index])
312 }
313}
314
315impl<'a> MulAssign<&'a Scalar> for Scalar {
316 fn mul_assign(&mut self, _rhs: &'a Scalar) {
317 *self = UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack();
318 }
319}
320
321define_mul_assign_variants!(LHS = Scalar, RHS = Scalar);
322
323impl<'a> Mul<&'a Scalar> for &Scalar {
324 type Output = Scalar;
325 fn mul(self, _rhs: &'a Scalar) -> Scalar {
326 UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack()
327 }
328}
329
330define_mul_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
331
332impl<'a> AddAssign<&'a Scalar> for Scalar {
333 fn add_assign(&mut self, _rhs: &'a Scalar) {
334 *self = *self + _rhs;
335 }
336}
337
338define_add_assign_variants!(LHS = Scalar, RHS = Scalar);
339
340impl<'a> Add<&'a Scalar> for &Scalar {
341 type Output = Scalar;
342 #[allow(non_snake_case)]
343 fn add(self, _rhs: &'a Scalar) -> Scalar {
344 UnpackedScalar::add(&self.unpack(), &_rhs.unpack()).pack()
347 }
348}
349
350define_add_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
351
352impl<'a> SubAssign<&'a Scalar> for Scalar {
353 fn sub_assign(&mut self, _rhs: &'a Scalar) {
354 *self = *self - _rhs;
355 }
356}
357
358define_sub_assign_variants!(LHS = Scalar, RHS = Scalar);
359
360impl<'a> Sub<&'a Scalar> for &Scalar {
361 type Output = Scalar;
362 #[allow(non_snake_case)]
363 fn sub(self, rhs: &'a Scalar) -> Scalar {
364 UnpackedScalar::sub(&self.unpack(), &rhs.unpack()).pack()
367 }
368}
369
370define_sub_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
371
372impl Neg for &Scalar {
373 type Output = Scalar;
374 #[allow(non_snake_case)]
375 fn neg(self) -> Scalar {
376 let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R);
377 let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R);
378 UnpackedScalar::sub(&UnpackedScalar::ZERO, &self_mod_l).pack()
379 }
380}
381
382impl Neg for Scalar {
383 type Output = Scalar;
384 fn neg(self) -> Scalar {
385 -&self
386 }
387}
388
389impl ConditionallySelectable for Scalar {
390 fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
391 let mut bytes = [0u8; 32];
392 #[allow(clippy::needless_range_loop)]
393 for i in 0..32 {
394 bytes[i] = u8::conditional_select(&a.bytes[i], &b.bytes[i], choice);
395 }
396 Scalar { bytes }
397 }
398}
399
400#[cfg(feature = "serde")]
401use serde::de::Visitor;
402#[cfg(feature = "serde")]
403use serde::{Deserialize, Deserializer, Serialize, Serializer};
404
405#[cfg(feature = "serde")]
406#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
407impl Serialize for Scalar {
408 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
409 where
410 S: Serializer,
411 {
412 use serde::ser::SerializeTuple;
413 let mut tup = serializer.serialize_tuple(32)?;
414 for byte in self.as_bytes().iter() {
415 tup.serialize_element(byte)?;
416 }
417 tup.end()
418 }
419}
420
421#[cfg(feature = "serde")]
422#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
423impl<'de> Deserialize<'de> for Scalar {
424 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
425 where
426 D: Deserializer<'de>,
427 {
428 struct ScalarVisitor;
429
430 impl<'de> Visitor<'de> for ScalarVisitor {
431 type Value = Scalar;
432
433 fn expecting(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
434 formatter.write_str(
435 "a sequence of 32 bytes whose little-endian interpretation is less than the \
436 basepoint order ℓ",
437 )
438 }
439
440 fn visit_seq<A>(self, mut seq: A) -> Result<Scalar, A::Error>
441 where
442 A: serde::de::SeqAccess<'de>,
443 {
444 let mut bytes = [0u8; 32];
445 #[allow(clippy::needless_range_loop)]
446 for i in 0..32 {
447 bytes[i] = seq
448 .next_element()?
449 .ok_or_else(|| serde::de::Error::invalid_length(i, &"expected 32 bytes"))?;
450 }
451 Option::from(Scalar::from_canonical_bytes(bytes))
452 .ok_or_else(|| serde::de::Error::custom("scalar was not canonically encoded"))
453 }
454 }
455
456 deserializer.deserialize_tuple(32, ScalarVisitor)
457 }
458}
459
460impl<T> Product<T> for Scalar
461where
462 T: Borrow<Scalar>,
463{
464 fn product<I>(iter: I) -> Self
465 where
466 I: Iterator<Item = T>,
467 {
468 iter.fold(Scalar::ONE, |acc, item| acc * item.borrow())
469 }
470}
471
472impl<T> Sum<T> for Scalar
473where
474 T: Borrow<Scalar>,
475{
476 fn sum<I>(iter: I) -> Self
477 where
478 I: Iterator<Item = T>,
479 {
480 iter.fold(Scalar::ZERO, |acc, item| acc + item.borrow())
481 }
482}
483
484impl Default for Scalar {
485 fn default() -> Scalar {
486 Scalar::ZERO
487 }
488}
489
490impl From<u8> for Scalar {
491 fn from(x: u8) -> Scalar {
492 let mut s_bytes = [0u8; 32];
493 s_bytes[0] = x;
494 Scalar { bytes: s_bytes }
495 }
496}
497
498impl From<u16> for Scalar {
499 fn from(x: u16) -> Scalar {
500 let mut s_bytes = [0u8; 32];
501 let x_bytes = x.to_le_bytes();
502 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
503 Scalar { bytes: s_bytes }
504 }
505}
506
507impl From<u32> for Scalar {
508 fn from(x: u32) -> Scalar {
509 let mut s_bytes = [0u8; 32];
510 let x_bytes = x.to_le_bytes();
511 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
512 Scalar { bytes: s_bytes }
513 }
514}
515
516impl From<u64> for Scalar {
517 fn from(x: u64) -> Scalar {
539 let mut s_bytes = [0u8; 32];
540 let x_bytes = x.to_le_bytes();
541 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
542 Scalar { bytes: s_bytes }
543 }
544}
545
546impl From<u128> for Scalar {
547 fn from(x: u128) -> Scalar {
548 let mut s_bytes = [0u8; 32];
549 let x_bytes = x.to_le_bytes();
550 s_bytes[0..x_bytes.len()].copy_from_slice(&x_bytes);
551 Scalar { bytes: s_bytes }
552 }
553}
554
555#[cfg(feature = "zeroize")]
556impl Zeroize for Scalar {
557 fn zeroize(&mut self) {
558 self.bytes.zeroize();
559 }
560}
561
562impl Scalar {
563 pub const ZERO: Self = Self { bytes: [0u8; 32] };
565
566 pub const ONE: Self = Self {
568 bytes: [
569 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
570 0, 0, 0,
571 ],
572 };
573
574 #[cfg(any(test, feature = "rand_core"))]
575 pub fn random<R: CryptoRngCore + ?Sized>(rng: &mut R) -> Self {
598 let mut scalar_bytes = [0u8; 64];
599 rng.fill_bytes(&mut scalar_bytes);
600 Scalar::from_bytes_mod_order_wide(&scalar_bytes)
601 }
602
603 #[cfg(feature = "digest")]
604 #[cfg_attr(feature = "digest", doc = "```")]
614 #[cfg_attr(not(feature = "digest"), doc = "```ignore")]
615 pub fn hash_from_bytes<D>(input: &[u8]) -> Scalar
626 where
627 D: Digest<OutputSize = U64> + Default,
628 {
629 let mut hash = D::default();
630 hash.update(input);
631 Scalar::from_hash(hash)
632 }
633
634 #[cfg(feature = "digest")]
635 pub fn from_hash<D>(hash: D) -> Scalar
672 where
673 D: Digest<OutputSize = U64>,
674 {
675 let mut output = [0u8; 64];
676 output.copy_from_slice(hash.finalize().as_slice());
677 Scalar::from_bytes_mod_order_wide(&output)
678 }
679
680 pub const fn to_bytes(&self) -> [u8; 32] {
692 self.bytes
693 }
694
695 pub const fn as_bytes(&self) -> &[u8; 32] {
707 &self.bytes
708 }
709
710 pub fn invert(&self) -> Scalar {
748 self.unpack().invert().pack()
749 }
750
751 #[cfg(feature = "alloc")]
788 pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar {
789 let n = inputs.len();
797 let one: UnpackedScalar = Scalar::ONE.unpack().as_montgomery();
798
799 let mut scratch = vec![one; n];
800
801 let mut acc = Scalar::ONE.unpack().as_montgomery();
803
804 let mut i = 0;
808 while i < n {
809 let input = &mut inputs[i];
810 scratch[i] = acc;
811
812 let tmp = input.unpack().as_montgomery();
815 *input = tmp.pack();
816 acc = UnpackedScalar::montgomery_mul(&acc, &tmp);
817 i += 1;
818 }
819
820 debug_assert!(acc.pack() != Scalar::ZERO);
822
823 acc = acc.montgomery_invert().from_montgomery();
825
826 let ret = acc.pack();
828
829 let mut i = n;
832 while i > 0 {
833 i -= 1;
834 let input = &mut inputs[i];
835 let scratch_val = &scratch[i];
836 let tmp = UnpackedScalar::montgomery_mul(&acc, &input.unpack());
837 *input = UnpackedScalar::montgomery_mul(&acc, scratch_val).pack();
838 acc = tmp;
839 }
840
841 #[cfg(feature = "zeroize")]
842 Zeroize::zeroize(&mut scratch);
843
844 ret
845 }
846
847 #[cfg(not(verify))]
849 #[allow(dead_code)]
850 pub(crate) fn bits_le(&self) -> impl DoubleEndedIterator<Item = bool> + '_ {
851 (0..256).map(|i| {
852 ((self.bytes[i >> 3] >> (i & 7)) & 1u8) == 1
856 })
857 }
858
859 pub(crate) fn non_adjacent_form(&self, w: usize) -> [i8; 256] {
932 debug_assert!(w >= 2);
934 debug_assert!(w <= 8);
936
937 let mut naf = [0i8; 256];
938
939 let mut x_u64 = [0u64; 5];
940 read_le_u64_into(&self.bytes, &mut x_u64[0..4]);
941
942 let width = 1 << w;
943 let window_mask = width - 1;
944
945 let mut pos = 0;
946 let mut carry = 0;
947 while pos < 256 {
948 let u64_idx = pos / 64;
950 let bit_idx = pos % 64;
951 let bit_buf: u64 = if bit_idx < 64 - w {
952 x_u64[u64_idx] >> bit_idx
954 } else {
955 (x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx))
957 };
958
959 let window = carry + (bit_buf & window_mask);
961
962 if window & 1 == 0 {
963 pos += 1;
968 continue;
969 }
970
971 if window < width / 2 {
972 carry = 0;
973 naf[pos] = window as i8;
974 } else {
975 carry = 1;
976 naf[pos] = (window as i8).wrapping_sub(width as i8);
977 }
978
979 pos += w;
980 }
981
982 naf
983 }
984
985 pub(crate) fn as_radix_16(&self) -> [i8; 64] {
996 debug_assert!(self[31] <= 127);
997 let mut output = [0i8; 64];
998
999 #[allow(clippy::identity_op)]
1002 #[inline(always)]
1003 fn bot_half(x: u8) -> u8 {
1004 (x >> 0) & 15
1005 }
1006 #[inline(always)]
1007 fn top_half(x: u8) -> u8 {
1008 (x >> 4) & 15
1009 }
1010
1011 let mut i = 0;
1012 while i < 32 {
1013 output[2 * i] = bot_half(self[i]) as i8;
1014 output[2 * i + 1] = top_half(self[i]) as i8;
1015 i += 1;
1016 }
1017 let mut i = 0;
1021 while i < 63 {
1022 let carry = (output[i] + 8) >> 4;
1023 output[i] -= carry << 4;
1024 output[i + 1] += carry;
1025 i += 1;
1026 }
1027 output
1031 }
1032
1033 #[cfg(any(feature = "alloc", all(test, feature = "precomputed-tables")))]
1036 pub(crate) fn to_radix_2w_size_hint(w: usize) -> usize {
1037 debug_assert!(w >= 4);
1038 debug_assert!(w <= 8);
1039
1040 let digits_count = if w <= 7 {
1046 (256 + w - 1) / w
1047 } else if w == 8 {
1048 (256 + w - 1) / w + 1_usize
1050 } else {
1051 panic!("invalid radix parameter")
1052 };
1053
1054 debug_assert!(digits_count <= 64);
1055 digits_count
1056 }
1057
1058 #[cfg(any(feature = "alloc", feature = "precomputed-tables"))]
1080 pub(crate) fn as_radix_2w(&self, w: usize) -> [i8; 64] {
1081 debug_assert!(w >= 4);
1082 debug_assert!(w <= 8);
1083
1084 if w == 4 {
1085 return self.as_radix_16();
1086 }
1087
1088 let mut scalar64x4 = [0u64; 4];
1090 read_le_u64_into(&self.bytes, &mut scalar64x4[0..4]);
1091
1092 let radix: u64 = 1 << w;
1093 let window_mask: u64 = radix - 1;
1094
1095 let mut carry = 0u64;
1096 let mut digits = [0i8; 64];
1097 let digits_count = (256 + w - 1) / w;
1098 #[allow(clippy::needless_range_loop)]
1099 for i in 0..digits_count {
1100 let bit_offset = i * w;
1102 let u64_idx = bit_offset / 64;
1103 let bit_idx = bit_offset % 64;
1104
1105 let bit_buf: u64 = if bit_idx < 64 - w || u64_idx == 3 {
1107 scalar64x4[u64_idx] >> bit_idx
1110 } else {
1111 (scalar64x4[u64_idx] >> bit_idx) | (scalar64x4[1 + u64_idx] << (64 - bit_idx))
1113 };
1114
1115 let coef = carry + (bit_buf & window_mask); carry = (coef + (radix / 2)) >> w;
1120 digits[i] = ((coef as i64) - (carry << w) as i64) as i8;
1121 }
1122
1123 if w == 8 {
1134 digits[digits_count] += carry as i8;
1135 } else {
1136 digits[digits_count - 1] += (carry << w) as i8;
1137 }
1138
1139 digits
1140 }
1141
1142 pub(crate) fn unpack(&self) -> UnpackedScalar {
1144 UnpackedScalar::from_bytes(&self.bytes)
1145 }
1146
1147 #[allow(non_snake_case)]
1149 fn reduce(&self) -> Scalar {
1150 let x = self.unpack();
1151 let xR = UnpackedScalar::mul_internal(&x, &constants::R);
1152 let x_mod_l = UnpackedScalar::montgomery_reduce(&xR);
1153 x_mod_l.pack()
1154 }
1155
1156 fn is_canonical(&self) -> Choice {
1159 self.ct_eq(&self.reduce())
1160 }
1161}
1162
1163impl UnpackedScalar {
1164 fn pack(&self) -> Scalar {
1166 Scalar {
1167 bytes: self.to_bytes(),
1168 }
1169 }
1170
1171 #[rustfmt::skip] #[allow(clippy::just_underscores_and_digits)]
1174 pub fn montgomery_invert(&self) -> UnpackedScalar {
1175 let _1 = *self;
1178 let _10 = _1.montgomery_square();
1179 let _100 = _10.montgomery_square();
1180 let _11 = UnpackedScalar::montgomery_mul(&_10, &_1);
1181 let _101 = UnpackedScalar::montgomery_mul(&_10, &_11);
1182 let _111 = UnpackedScalar::montgomery_mul(&_10, &_101);
1183 let _1001 = UnpackedScalar::montgomery_mul(&_10, &_111);
1184 let _1011 = UnpackedScalar::montgomery_mul(&_10, &_1001);
1185 let _1111 = UnpackedScalar::montgomery_mul(&_100, &_1011);
1186
1187 let mut y = UnpackedScalar::montgomery_mul(&_1111, &_1);
1189
1190 #[inline]
1191 fn square_multiply(y: &mut UnpackedScalar, squarings: usize, x: &UnpackedScalar) {
1192 let mut i = 0;
1193 while i < squarings {
1194 *y = y.montgomery_square();
1195 i += 1;
1196 }
1197 *y = UnpackedScalar::montgomery_mul(y, x);
1198 }
1199
1200 square_multiply(&mut y, 123 + 3, &_101);
1201 square_multiply(&mut y, 2 + 2, &_11);
1202 square_multiply(&mut y, 1 + 4, &_1111);
1203 square_multiply(&mut y, 1 + 4, &_1111);
1204 square_multiply(&mut y, 4, &_1001);
1205 square_multiply(&mut y, 2, &_11);
1206 square_multiply(&mut y, 1 + 4, &_1111);
1207 square_multiply(&mut y, 1 + 3, &_101);
1208 square_multiply(&mut y, 3 + 3, &_101);
1209 square_multiply(&mut y, 3, &_111);
1210 square_multiply(&mut y, 1 + 4, &_1111);
1211 square_multiply(&mut y, 2 + 3, &_111);
1212 square_multiply(&mut y, 2 + 2, &_11);
1213 square_multiply(&mut y, 1 + 4, &_1011);
1214 square_multiply(&mut y, 2 + 4, &_1011);
1215 square_multiply(&mut y, 6 + 4, &_1001);
1216 square_multiply(&mut y, 2 + 2, &_11);
1217 square_multiply(&mut y, 3 + 2, &_11);
1218 square_multiply(&mut y, 3 + 2, &_11);
1219 square_multiply(&mut y, 1 + 4, &_1001);
1220 square_multiply(&mut y, 1 + 3, &_111);
1221 square_multiply(&mut y, 2 + 4, &_1111);
1222 square_multiply(&mut y, 1 + 4, &_1011);
1223 square_multiply(&mut y, 3, &_101);
1224 square_multiply(&mut y, 2 + 4, &_1111);
1225 square_multiply(&mut y, 3, &_101);
1226 square_multiply(&mut y, 1 + 2, &_11);
1227
1228 y
1229 }
1230
1231 pub fn invert(&self) -> UnpackedScalar {
1233 self.as_montgomery().montgomery_invert().from_montgomery()
1234 }
1235}
1236
1237#[cfg(feature = "group")]
1238impl Field for Scalar {
1239 const ZERO: Self = Self::ZERO;
1240 const ONE: Self = Self::ONE;
1241
1242 fn random(mut rng: impl RngCore) -> Self {
1243 let mut scalar_bytes = [0u8; 64];
1245 rng.fill_bytes(&mut scalar_bytes);
1246 Self::from_bytes_mod_order_wide(&scalar_bytes)
1247 }
1248
1249 fn square(&self) -> Self {
1250 self * self
1251 }
1252
1253 fn double(&self) -> Self {
1254 self + self
1255 }
1256
1257 fn invert(&self) -> CtOption<Self> {
1258 CtOption::new(self.invert(), !self.is_zero())
1259 }
1260
1261 fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) {
1262 #[allow(unused_qualifications)]
1263 group::ff::helpers::sqrt_ratio_generic(num, div)
1264 }
1265
1266 fn sqrt(&self) -> CtOption<Self> {
1267 #[allow(unused_qualifications)]
1268 group::ff::helpers::sqrt_tonelli_shanks(
1269 self,
1270 [
1271 0xcb02_4c63_4b9e_ba7d,
1272 0x029b_df3b_d45e_f39a,
1273 0x0000_0000_0000_0000,
1274 0x0200_0000_0000_0000,
1275 ],
1276 )
1277 }
1278}
1279
1280#[cfg(feature = "group")]
1281impl PrimeField for Scalar {
1282 type Repr = [u8; 32];
1283
1284 fn from_repr(repr: Self::Repr) -> CtOption<Self> {
1285 Self::from_canonical_bytes(repr)
1286 }
1287
1288 fn from_repr_vartime(repr: Self::Repr) -> Option<Self> {
1289 if (repr[31] >> 7) != 0u8 {
1291 return None;
1292 }
1293
1294 let candidate = Scalar { bytes: repr };
1295
1296 if candidate == candidate.reduce() {
1297 Some(candidate)
1298 } else {
1299 None
1300 }
1301 }
1302
1303 fn to_repr(&self) -> Self::Repr {
1304 self.to_bytes()
1305 }
1306
1307 fn is_odd(&self) -> Choice {
1308 Choice::from(self.as_bytes()[0] & 1)
1309 }
1310
1311 const MODULUS: &'static str =
1312 "0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed";
1313 const NUM_BITS: u32 = 253;
1314 const CAPACITY: u32 = 252;
1315
1316 const TWO_INV: Self = Self {
1317 bytes: [
1318 0xf7, 0xe9, 0x7a, 0x2e, 0x8d, 0x31, 0x09, 0x2c, 0x6b, 0xce, 0x7b, 0x51, 0xef, 0x7c,
1319 0x6f, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1320 0x00, 0x00, 0x00, 0x08,
1321 ],
1322 };
1323 const MULTIPLICATIVE_GENERATOR: Self = Self {
1324 bytes: [
1325 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1326 0, 0, 0,
1327 ],
1328 };
1329 const S: u32 = 2;
1330 const ROOT_OF_UNITY: Self = Self {
1331 bytes: [
1332 0xd4, 0x07, 0xbe, 0xeb, 0xdf, 0x75, 0x87, 0xbe, 0xfe, 0x83, 0xce, 0x42, 0x53, 0x56,
1333 0xf0, 0x0e, 0x7a, 0xc2, 0xc1, 0xab, 0x60, 0x6d, 0x3d, 0x7d, 0xe7, 0x81, 0x79, 0xe0,
1334 0x10, 0x73, 0x4a, 0x09,
1335 ],
1336 };
1337 const ROOT_OF_UNITY_INV: Self = Self {
1338 bytes: [
1339 0x19, 0xcc, 0x37, 0x71, 0x3a, 0xed, 0x8a, 0x99, 0xd7, 0x18, 0x29, 0x60, 0x8b, 0xa3,
1340 0xee, 0x05, 0x86, 0x3d, 0x3e, 0x54, 0x9f, 0x92, 0xc2, 0x82, 0x18, 0x7e, 0x86, 0x1f,
1341 0xef, 0x8c, 0xb5, 0x06,
1342 ],
1343 };
1344 const DELTA: Self = Self {
1345 bytes: [
1346 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1347 0, 0, 0,
1348 ],
1349 };
1350}
1351
1352#[cfg(feature = "group-bits")]
1353impl PrimeFieldBits for Scalar {
1354 type ReprBits = [u8; 32];
1355
1356 fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
1357 self.to_repr().into()
1358 }
1359
1360 fn char_le_bits() -> FieldBits<Self::ReprBits> {
1361 constants::BASEPOINT_ORDER_PRIVATE.to_bytes().into()
1362 }
1363}
1364
1365#[cfg(feature = "group")]
1366impl FromUniformBytes<64> for Scalar {
1367 fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
1368 Scalar::from_bytes_mod_order_wide(bytes)
1369 }
1370}
1371
1372fn read_le_u64_into(src: &[u8], dst: &mut [u64]) {
1377 assert!(src.len() == 8 * dst.len());
1378 let mut i = 0;
1379 while i < dst.len() {
1380 let start = i * 8;
1381 dst[i] = u64::from_le_bytes([
1382 src[start],
1383 src[start + 1],
1384 src[start + 2],
1385 src[start + 3],
1386 src[start + 4],
1387 src[start + 5],
1388 src[start + 6],
1389 src[start + 7],
1390 ]);
1391 i += 1;
1392 }
1393}
1394
1395#[must_use]
1415pub const fn clamp_integer(mut bytes: [u8; 32]) -> [u8; 32] {
1416 bytes[0] &= 0b1111_1000;
1417 bytes[31] &= 0b0111_1111;
1418 bytes[31] |= 0b0100_0000;
1419 bytes
1420}
1421
1422#[cfg(test)]
1423pub(crate) mod test {
1424 use super::*;
1425
1426 #[cfg(feature = "alloc")]
1427 use alloc::vec::Vec;
1428
1429 pub static X: Scalar = Scalar {
1431 bytes: [
1432 0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84, 0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2,
1433 0x7d, 0x52, 0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44, 0xd4, 0x49, 0xf4, 0xa8,
1434 0x79, 0xd9, 0xf2, 0x04,
1435 ],
1436 };
1437 pub static XINV: Scalar = Scalar {
1439 bytes: [
1440 0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb, 0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01,
1441 0x63, 0x47, 0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96, 0xd5, 0x0b, 0xcd, 0x7a,
1442 0x3f, 0x96, 0x2a, 0x0f,
1443 ],
1444 };
1445 pub static Y: Scalar = Scalar {
1447 bytes: [
1448 0x90, 0x76, 0x33, 0xfe, 0x1c, 0x4b, 0x66, 0xa4, 0xa2, 0x8d, 0x2d, 0xd7, 0x67, 0x83,
1449 0x86, 0xc3, 0x53, 0xd0, 0xde, 0x54, 0x55, 0xd4, 0xfc, 0x9d, 0xe8, 0xef, 0x7a, 0xc3,
1450 0x1f, 0x35, 0xbb, 0x05,
1451 ],
1452 };
1453
1454 pub(crate) static LARGEST_UNREDUCED_SCALAR: Scalar = Scalar {
1460 bytes: [
1461 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1462 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1463 0xff, 0xff, 0xff, 0x7f,
1464 ],
1465 };
1466
1467 static X_TIMES_Y: Scalar = Scalar {
1469 bytes: [
1470 0x6c, 0x33, 0x74, 0xa1, 0x89, 0x4f, 0x62, 0x21, 0x0a, 0xaa, 0x2f, 0xe1, 0x86, 0xa6,
1471 0xf9, 0x2c, 0xe0, 0xaa, 0x75, 0xc2, 0x77, 0x95, 0x81, 0xc2, 0x95, 0xfc, 0x08, 0x17,
1472 0x9a, 0x73, 0x94, 0x0c,
1473 ],
1474 };
1475
1476 static CANONICAL_2_256_MINUS_1: Scalar = Scalar {
1480 bytes: [
1481 28, 149, 152, 141, 116, 49, 236, 214, 112, 207, 125, 115, 244, 91, 239, 198, 254, 255,
1482 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15,
1483 ],
1484 };
1485
1486 static A_SCALAR: Scalar = Scalar {
1487 bytes: [
1488 0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
1489 0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
1490 0x23, 0x76, 0xef, 0x09,
1491 ],
1492 };
1493
1494 static A_NAF: [i8; 256] = [
1495 0, 13, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, -11, 0, 0, 0, 0, 3, 0, 0,
1496 0, 0, 1, 0, 0, 0, 0, 9, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 11, 0, 0, 0, 0,
1497 11, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
1498 0, -1, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, -15, 0, 0, 0, 0, -7, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 5,
1499 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, -11, 0, 0, 0, 0, -7, 0, 0, 0, 0, -13, 0, 0,
1500 0, 0, 11, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1501 7, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 15,
1502 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, -15, 0,
1503 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1504 ];
1505
1506 const BASEPOINT_ORDER_MINUS_ONE: Scalar = Scalar {
1507 bytes: [
1508 0xec, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9,
1509 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1510 0x00, 0x00, 0x00, 0x10,
1511 ],
1512 };
1513
1514 static LARGEST_CLAMPED_INTEGER: [u8; 32] = clamp_integer(LARGEST_UNREDUCED_SCALAR.bytes);
1516
1517 #[test]
1518 fn fuzzer_testcase_reduction() {
1519 let a_bytes = [
1521 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
1522 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1523 ];
1524 let b_bytes = [
1526 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 210, 210,
1527 210, 255, 255, 255, 255, 10,
1528 ];
1529 let c_bytes = [
1531 134, 171, 119, 216, 180, 128, 178, 62, 171, 132, 32, 62, 34, 119, 104, 193, 47, 215,
1532 181, 250, 14, 207, 172, 93, 75, 207, 211, 103, 144, 204, 56, 14,
1533 ];
1534
1535 let a = Scalar::from_bytes_mod_order(a_bytes);
1536 let b = Scalar::from_bytes_mod_order(b_bytes);
1537 let c = Scalar::from_bytes_mod_order(c_bytes);
1538
1539 let mut tmp = [0u8; 64];
1540
1541 tmp[0..32].copy_from_slice(&a_bytes[..]);
1543 let also_a = Scalar::from_bytes_mod_order_wide(&tmp);
1544
1545 tmp[0..32].copy_from_slice(&b_bytes[..]);
1547 let also_b = Scalar::from_bytes_mod_order_wide(&tmp);
1548
1549 let expected_c = a * b;
1550 let also_expected_c = also_a * also_b;
1551
1552 assert_eq!(c, expected_c);
1553 assert_eq!(c, also_expected_c);
1554 }
1555
1556 #[test]
1557 fn non_adjacent_form_test_vector() {
1558 let naf = A_SCALAR.non_adjacent_form(5);
1559 for i in 0..256 {
1560 assert_eq!(naf[i], A_NAF[i]);
1561 }
1562 }
1563
1564 fn non_adjacent_form_iter(w: usize, x: &Scalar) {
1565 let naf = x.non_adjacent_form(w);
1566
1567 let mut y = Scalar::ZERO;
1569 for i in (0..256).rev() {
1570 y += y;
1571 let digit = if naf[i] < 0 {
1572 -Scalar::from((-naf[i]) as u64)
1573 } else {
1574 Scalar::from(naf[i] as u64)
1575 };
1576 y += digit;
1577 }
1578
1579 assert_eq!(*x, y);
1580 }
1581
1582 #[test]
1583 fn non_adjacent_form_random() {
1584 let mut rng = rand::thread_rng();
1585 for _ in 0..1_000 {
1586 let x = Scalar::random(&mut rng);
1587 for w in &[5, 6, 7, 8] {
1588 non_adjacent_form_iter(*w, &x);
1589 }
1590 }
1591 }
1592
1593 #[test]
1594 fn from_u64() {
1595 let val: u64 = 0xdeadbeefdeadbeef;
1596 let s = Scalar::from(val);
1597 assert_eq!(s[7], 0xde);
1598 assert_eq!(s[6], 0xad);
1599 assert_eq!(s[5], 0xbe);
1600 assert_eq!(s[4], 0xef);
1601 assert_eq!(s[3], 0xde);
1602 assert_eq!(s[2], 0xad);
1603 assert_eq!(s[1], 0xbe);
1604 assert_eq!(s[0], 0xef);
1605 }
1606
1607 #[test]
1608 fn scalar_mul_by_one() {
1609 let test_scalar = X * Scalar::ONE;
1610 for i in 0..32 {
1611 assert!(test_scalar[i] == X[i]);
1612 }
1613 }
1614
1615 #[test]
1616 fn add_reduces() {
1617 assert_eq!(BASEPOINT_ORDER_MINUS_ONE + Scalar::ONE, Scalar::ZERO);
1619 }
1620
1621 #[test]
1622 fn sub_reduces() {
1623 assert_eq!(Scalar::ZERO - Scalar::ONE, BASEPOINT_ORDER_MINUS_ONE);
1625 }
1626
1627 #[test]
1628 fn impl_add() {
1629 let two = Scalar::from(2u64);
1630 let one = Scalar::ONE;
1631 let should_be_two = one + one;
1632 assert_eq!(should_be_two, two);
1633 }
1634
1635 #[allow(non_snake_case)]
1636 #[test]
1637 fn impl_mul() {
1638 let should_be_X_times_Y = X * Y;
1639 assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1640 }
1641
1642 #[allow(non_snake_case)]
1643 #[test]
1644 #[cfg(feature = "alloc")]
1645 fn impl_product() {
1646 let X_Y_vector = [X, Y];
1648 let should_be_X_times_Y: Scalar = X_Y_vector.iter().product();
1649 assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1650
1651 let one = Scalar::ONE;
1653 let empty_vector = [];
1654 let should_be_one: Scalar = empty_vector.iter().product();
1655 assert_eq!(should_be_one, one);
1656
1657 let xs = [Scalar::from(2u64); 10];
1659 let ys = [Scalar::from(3u64); 10];
1660 let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x * y);
1662
1663 let x_prod: Scalar = xs.iter().product();
1664 let y_prod: Scalar = ys.iter().product();
1665 let z_prod: Scalar = zs.product();
1666
1667 assert_eq!(x_prod, Scalar::from(1024u64));
1668 assert_eq!(y_prod, Scalar::from(59049u64));
1669 assert_eq!(z_prod, Scalar::from(60466176u64));
1670 assert_eq!(x_prod * y_prod, z_prod);
1671 }
1672
1673 #[test]
1674 #[cfg(feature = "alloc")]
1675 fn impl_sum() {
1676 let two = Scalar::from(2u64);
1678 let one_vector = [Scalar::ONE, Scalar::ONE];
1679 let should_be_two: Scalar = one_vector.iter().sum();
1680 assert_eq!(should_be_two, two);
1681
1682 let zero = Scalar::ZERO;
1684 let empty_vector = [];
1685 let should_be_zero: Scalar = empty_vector.iter().sum();
1686 assert_eq!(should_be_zero, zero);
1687
1688 let xs = [Scalar::from(1u64); 10];
1690 let ys = [Scalar::from(2u64); 10];
1691 let zs = xs.iter().zip(ys.iter()).map(|(x, y)| x + y);
1693
1694 let x_sum: Scalar = xs.iter().sum();
1695 let y_sum: Scalar = ys.iter().sum();
1696 let z_sum: Scalar = zs.sum();
1697
1698 assert_eq!(x_sum, Scalar::from(10u64));
1699 assert_eq!(y_sum, Scalar::from(20u64));
1700 assert_eq!(z_sum, Scalar::from(30u64));
1701 assert_eq!(x_sum + y_sum, z_sum);
1702 }
1703
1704 #[test]
1705 fn square() {
1706 let expected = X * X;
1707 let actual = X.unpack().square().pack();
1708 for i in 0..32 {
1709 assert!(expected[i] == actual[i]);
1710 }
1711 }
1712
1713 #[test]
1714 fn reduce() {
1715 let biggest = Scalar::from_bytes_mod_order([0xff; 32]);
1716 assert_eq!(biggest, CANONICAL_2_256_MINUS_1);
1717 }
1718
1719 #[test]
1720 fn from_bytes_mod_order_wide() {
1721 let mut bignum = [0u8; 64];
1722 for i in 0..32 {
1724 bignum[i] = X[i];
1725 bignum[32 + i] = X[i];
1726 }
1727 let reduced = Scalar {
1730 bytes: [
1731 216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1732 91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1733 ],
1734 };
1735 let test_red = Scalar::from_bytes_mod_order_wide(&bignum);
1736 for i in 0..32 {
1737 assert!(test_red[i] == reduced[i]);
1738 }
1739 }
1740
1741 #[allow(non_snake_case)]
1742 #[test]
1743 fn invert() {
1744 let inv_X = X.invert();
1745 assert_eq!(inv_X, XINV);
1746 let should_be_one = inv_X * X;
1747 assert_eq!(should_be_one, Scalar::ONE);
1748 }
1749
1750 #[allow(non_snake_case)]
1752 #[test]
1753 fn neg_twice_is_identity() {
1754 let negative_X = -&X;
1755 let should_be_X = -&negative_X;
1756
1757 assert_eq!(should_be_X, X);
1758 }
1759
1760 #[test]
1761 fn to_bytes_from_bytes_roundtrips() {
1762 let unpacked = X.unpack();
1763 let bytes = unpacked.to_bytes();
1764 let should_be_unpacked = UnpackedScalar::from_bytes(&bytes);
1765
1766 assert_eq!(should_be_unpacked.0, unpacked.0);
1767 }
1768
1769 #[test]
1770 fn montgomery_reduce_matches_from_bytes_mod_order_wide() {
1771 let mut bignum = [0u8; 64];
1772
1773 for i in 0..32 {
1775 bignum[i] = X[i];
1776 bignum[32 + i] = X[i];
1777 }
1778 let expected = Scalar {
1781 bytes: [
1782 216, 154, 179, 139, 210, 121, 2, 71, 69, 99, 158, 216, 23, 173, 63, 100, 204, 0,
1783 91, 50, 219, 153, 57, 249, 28, 82, 31, 197, 100, 165, 192, 8,
1784 ],
1785 };
1786 let reduced = Scalar::from_bytes_mod_order_wide(&bignum);
1787
1788 assert_eq!(reduced.bytes, expected.bytes);
1790
1791 let interim =
1793 UnpackedScalar::mul_internal(&UnpackedScalar::from_bytes_wide(&bignum), &constants::R);
1794 let montgomery_reduced = UnpackedScalar::montgomery_reduce(&interim);
1796
1797 assert_eq!(montgomery_reduced.0, reduced.unpack().0);
1799 assert_eq!(montgomery_reduced.0, expected.unpack().0)
1800 }
1801
1802 #[test]
1803 fn canonical_decoding() {
1804 let canonical_bytes = [
1806 99, 99, 99, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1807 0, 0, 0, 0,
1808 ];
1809
1810 let non_canonical_bytes_because_unreduced = [16; 32];
1815
1816 let non_canonical_bytes_because_highbit = [
1818 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1819 0, 0, 128,
1820 ];
1821
1822 assert!(bool::from(
1823 Scalar::from_canonical_bytes(canonical_bytes).is_some()
1824 ));
1825 assert!(bool::from(
1826 Scalar::from_canonical_bytes(non_canonical_bytes_because_unreduced).is_none()
1827 ));
1828 assert!(bool::from(
1829 Scalar::from_canonical_bytes(non_canonical_bytes_because_highbit).is_none()
1830 ));
1831 }
1832
1833 #[test]
1834 #[cfg(feature = "serde")]
1835 fn serde_bincode_scalar_roundtrip() {
1836 use bincode;
1837 let encoded = bincode::serialize(&X).unwrap();
1838 let parsed: Scalar = bincode::deserialize(&encoded).unwrap();
1839 assert_eq!(parsed, X);
1840
1841 assert_eq!(encoded.len(), 32);
1843
1844 assert_eq!(X, bincode::deserialize(X.as_bytes()).unwrap(),);
1846 }
1847
1848 #[cfg(all(debug_assertions, feature = "alloc"))]
1849 #[test]
1850 #[should_panic]
1851 fn batch_invert_with_a_zero_input_panics() {
1852 let mut xs = vec![Scalar::ONE; 16];
1853 xs[3] = Scalar::ZERO;
1854 Scalar::batch_invert(&mut xs);
1856 }
1857
1858 #[test]
1859 #[cfg(feature = "alloc")]
1860 fn batch_invert_empty() {
1861 assert_eq!(Scalar::ONE, Scalar::batch_invert(&mut []));
1862 }
1863
1864 #[test]
1865 #[cfg(feature = "alloc")]
1866 fn batch_invert_consistency() {
1867 let mut x = Scalar::from(1u64);
1868 let mut v1: Vec<_> = (0..16)
1869 .map(|_| {
1870 let tmp = x;
1871 x = x + x;
1872 tmp
1873 })
1874 .collect();
1875 let v2 = v1.clone();
1876
1877 let expected: Scalar = v1.iter().product();
1878 let expected = expected.invert();
1879 let ret = Scalar::batch_invert(&mut v1);
1880 assert_eq!(ret, expected);
1881
1882 for (a, b) in v1.iter().zip(v2.iter()) {
1883 assert_eq!(a * b, Scalar::ONE);
1884 }
1885 }
1886
1887 #[cfg(feature = "precomputed-tables")]
1888 fn test_pippenger_radix_iter(scalar: Scalar, w: usize) {
1889 let digits_count = Scalar::to_radix_2w_size_hint(w);
1890 let digits = scalar.as_radix_2w(w);
1891
1892 let radix = Scalar::from((1 << w) as u64);
1893 let mut term = Scalar::ONE;
1894 let mut recovered_scalar = Scalar::ZERO;
1895 for digit in &digits[0..digits_count] {
1896 let digit = *digit;
1897 if digit != 0 {
1898 let sdigit = if digit < 0 {
1899 -Scalar::from((-(digit as i64)) as u64)
1900 } else {
1901 Scalar::from(digit as u64)
1902 };
1903 recovered_scalar += term * sdigit;
1904 }
1905 term *= radix;
1906 }
1907 assert_eq!(recovered_scalar, scalar.reduce());
1909 }
1910
1911 #[test]
1912 #[cfg(feature = "precomputed-tables")]
1913 fn test_pippenger_radix() {
1914 use core::iter;
1915 let cases = (2..100)
1918 .map(|s| Scalar::from(s as u64).invert())
1919 .chain(iter::once(LARGEST_UNREDUCED_SCALAR));
1922
1923 for scalar in cases {
1924 test_pippenger_radix_iter(scalar, 6);
1925 test_pippenger_radix_iter(scalar, 7);
1926 test_pippenger_radix_iter(scalar, 8);
1927 }
1928 }
1929
1930 #[test]
1931 #[cfg(feature = "alloc")]
1932 fn test_read_le_u64_into() {
1933 let cases: &[(&[u8], &[u64])] = &[
1934 (
1935 &[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0],
1936 &[0xF00F_F11F_0110_EFFE],
1937 ),
1938 (
1939 &[
1940 0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F, 0xF0, 0x12, 0x34, 0x56, 0x78, 0x9A,
1941 0xBC, 0xDE, 0xF0,
1942 ],
1943 &[0xF00F_F11F_0110_EFFE, 0xF0DE_BC9A_7856_3412],
1944 ),
1945 ];
1946
1947 for (src, expected) in cases {
1948 let mut dst = vec![0; expected.len()];
1949 read_le_u64_into(src, &mut dst);
1950
1951 assert_eq!(&dst, expected, "Expected {:x?} got {:x?}", expected, dst);
1952 }
1953 }
1954
1955 #[test]
1957 fn test_scalar_from_int() {
1958 let s1 = Scalar::ONE;
1959
1960 let x = 0x23u8;
1964 let sx = Scalar::from(x);
1965 assert_eq!(sx + s1, Scalar::from(x + 1));
1966
1967 let x = 0x2323u16;
1968 let sx = Scalar::from(x);
1969 assert_eq!(sx + s1, Scalar::from(x + 1));
1970
1971 let x = 0x2323_2323u32;
1972 let sx = Scalar::from(x);
1973 assert_eq!(sx + s1, Scalar::from(x + 1));
1974
1975 let x = 0x2323_2323_2323_2323u64;
1976 let sx = Scalar::from(x);
1977 assert_eq!(sx + s1, Scalar::from(x + 1));
1978
1979 let x = 0x2323_2323_2323_2323_2323_2323_2323_2323u128;
1980 let sx = Scalar::from(x);
1981 assert_eq!(sx + s1, Scalar::from(x + 1));
1982 }
1983
1984 #[cfg(feature = "group")]
1985 #[test]
1986 fn ff_constants() {
1987 assert_eq!(Scalar::from(2u64) * Scalar::TWO_INV, Scalar::ONE);
1988
1989 assert_eq!(
1990 Scalar::ROOT_OF_UNITY * Scalar::ROOT_OF_UNITY_INV,
1991 Scalar::ONE,
1992 );
1993
1994 assert_eq!(
1996 Scalar::ROOT_OF_UNITY.pow(&[1u64 << Scalar::S, 0, 0, 0]),
1997 Scalar::ONE,
1998 );
1999
2000 assert_eq!(
2002 Scalar::DELTA.pow(&[
2003 0x9604_98c6_973d_74fb,
2004 0x0537_be77_a8bd_e735,
2005 0x0000_0000_0000_0000,
2006 0x0400_0000_0000_0000,
2007 ]),
2008 Scalar::ONE,
2009 );
2010 }
2011
2012 #[cfg(feature = "group")]
2013 #[test]
2014 fn ff_impls() {
2015 assert!(bool::from(Scalar::ZERO.is_even()));
2016 assert!(bool::from(Scalar::ONE.is_odd()));
2017 assert!(bool::from(Scalar::from(2u64).is_even()));
2018 assert!(bool::from(Scalar::DELTA.is_even()));
2019
2020 assert!(bool::from(Field::invert(&Scalar::ZERO).is_none()));
2021 assert_eq!(Field::invert(&X).unwrap(), XINV);
2022
2023 let x_sq = X.square();
2024 assert!([X, -X].contains(&x_sq.sqrt().unwrap()));
2026
2027 assert_eq!(Scalar::from_repr_vartime(X.to_repr()), Some(X));
2028 assert_eq!(Scalar::from_repr_vartime([0xff; 32]), None);
2029
2030 assert_eq!(Scalar::from_repr(X.to_repr()).unwrap(), X);
2031 assert!(bool::from(Scalar::from_repr([0xff; 32]).is_none()));
2032 }
2033
2034 #[test]
2035 #[should_panic]
2036 fn test_read_le_u64_into_should_panic_on_bad_input() {
2037 let mut dst = [0_u64; 1];
2038 read_le_u64_into(&[0xFE, 0xEF, 0x10, 0x01, 0x1F, 0xF1, 0x0F], &mut dst);
2040 }
2041
2042 #[test]
2043 fn test_scalar_clamp() {
2044 let input = A_SCALAR.bytes;
2045 let expected = [
2046 0x18, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d, 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8,
2047 0x26, 0x4d, 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1, 0x58, 0x9e, 0x7b, 0x7f,
2048 0x23, 0x76, 0xef, 0x49,
2049 ];
2050 let actual = clamp_integer(input);
2051 assert_eq!(actual, expected);
2052
2053 let expected = [
2054 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2055 0, 0, 0x40,
2056 ];
2057 let actual = clamp_integer([0; 32]);
2058 assert_eq!(expected, actual);
2059 let expected = [
2060 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2061 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
2062 0xff, 0xff, 0xff, 0x7f,
2063 ];
2064 let actual = clamp_integer([0xff; 32]);
2065 assert_eq!(actual, expected);
2066
2067 assert_eq!(
2068 LARGEST_CLAMPED_INTEGER,
2069 clamp_integer(LARGEST_CLAMPED_INTEGER)
2070 );
2071 }
2072
2073 #[test]
2077 fn test_mul_reduction_invariance() {
2078 let mut rng = rand::thread_rng();
2079
2080 for _ in 0..10 {
2081 let (a, b, c) = {
2084 let mut a_bytes = [0u8; 32];
2085 let mut b_bytes = [0u8; 32];
2086 let mut c_bytes = [0u8; 32];
2087 rng.fill_bytes(&mut a_bytes);
2088 rng.fill_bytes(&mut b_bytes);
2089 rng.fill_bytes(&mut c_bytes);
2090 (
2091 Scalar { bytes: a_bytes },
2092 Scalar { bytes: b_bytes },
2093 Scalar {
2094 bytes: clamp_integer(c_bytes),
2095 },
2096 )
2097 };
2098
2099 let reduced_mul_ab = a.reduce() * b.reduce();
2101 let reduced_mul_ac = a.reduce() * c.reduce();
2102 assert_eq!(a * b, reduced_mul_ab);
2103 assert_eq!(a.reduce() * b, reduced_mul_ab);
2104 assert_eq!(a * b.reduce(), reduced_mul_ab);
2105 assert_eq!(a * c, reduced_mul_ac);
2106 assert_eq!(a.reduce() * c, reduced_mul_ac);
2107 assert_eq!(a * c.reduce(), reduced_mul_ac);
2108 }
2109 }
2110}