1use crate::distr::{Distribution, StandardUniform};
12use crate::{Rng, RngExt};
13#[cfg(all(target_arch = "x86", feature = "simd_support"))]
14use core::arch::x86::__m512i;
15#[cfg(target_arch = "x86")]
16use core::arch::x86::{__m128i, __m256i};
17#[cfg(all(target_arch = "x86_64", feature = "simd_support"))]
18use core::arch::x86_64::__m512i;
19#[cfg(target_arch = "x86_64")]
20use core::arch::x86_64::{__m128i, __m256i};
21use core::num::{
22 NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI128, NonZeroU8, NonZeroU16, NonZeroU32,
23 NonZeroU64, NonZeroU128,
24};
25#[cfg(feature = "simd_support")]
26use core::simd::*;
27
28impl Distribution<u8> for StandardUniform {
29 #[inline]
30 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 {
31 rng.next_u32() as u8
32 }
33}
34
35impl Distribution<u16> for StandardUniform {
36 #[inline]
37 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u16 {
38 rng.next_u32() as u16
39 }
40}
41
42impl Distribution<u32> for StandardUniform {
43 #[inline]
44 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u32 {
45 rng.next_u32()
46 }
47}
48
49impl Distribution<u64> for StandardUniform {
50 #[inline]
51 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
52 rng.next_u64()
53 }
54}
55
56impl Distribution<u128> for StandardUniform {
57 #[inline]
58 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u128 {
59 let x = u128::from(rng.next_u64());
61 let y = u128::from(rng.next_u64());
62 (y << 64) | x
63 }
64}
65
66macro_rules! impl_int_from_uint {
67 ($ty:ty, $uty:ty) => {
68 impl Distribution<$ty> for StandardUniform {
69 #[inline]
70 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
71 rng.random::<$uty>() as $ty
72 }
73 }
74 };
75}
76
77impl Distribution<i8> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> i8 {
rng.random::<u8>() as i8
}
}impl_int_from_uint! { i8, u8 }
78impl Distribution<i16> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> i16 {
rng.random::<u16>() as i16
}
}impl_int_from_uint! { i16, u16 }
79impl Distribution<i32> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> i32 {
rng.random::<u32>() as i32
}
}impl_int_from_uint! { i32, u32 }
80impl Distribution<i64> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> i64 {
rng.random::<u64>() as i64
}
}impl_int_from_uint! { i64, u64 }
81impl Distribution<i128> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> i128 {
rng.random::<u128>() as i128
}
}impl_int_from_uint! { i128, u128 }
82
83macro_rules! impl_nzint {
84 ($ty:ty, $new:path) => {
85 impl Distribution<$ty> for StandardUniform {
86 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
87 loop {
88 if let Some(nz) = $new(rng.random()) {
89 break nz;
90 }
91 }
92 }
93 }
94 };
95}
96
97impl Distribution<NonZeroU8> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NonZeroU8 {
loop { if let Some(nz) = NonZeroU8::new(rng.random()) { break nz; } }
}
}impl_nzint!(NonZeroU8, NonZeroU8::new);
98impl Distribution<NonZeroU16> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NonZeroU16 {
loop { if let Some(nz) = NonZeroU16::new(rng.random()) { break nz; } }
}
}impl_nzint!(NonZeroU16, NonZeroU16::new);
99impl Distribution<NonZeroU32> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NonZeroU32 {
loop { if let Some(nz) = NonZeroU32::new(rng.random()) { break nz; } }
}
}impl_nzint!(NonZeroU32, NonZeroU32::new);
100impl Distribution<NonZeroU64> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NonZeroU64 {
loop { if let Some(nz) = NonZeroU64::new(rng.random()) { break nz; } }
}
}impl_nzint!(NonZeroU64, NonZeroU64::new);
101impl Distribution<NonZeroU128> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NonZeroU128 {
loop {
if let Some(nz) = NonZeroU128::new(rng.random()) { break nz; }
}
}
}impl_nzint!(NonZeroU128, NonZeroU128::new);
102
103impl Distribution<NonZeroI8> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NonZeroI8 {
loop { if let Some(nz) = NonZeroI8::new(rng.random()) { break nz; } }
}
}impl_nzint!(NonZeroI8, NonZeroI8::new);
104impl Distribution<NonZeroI16> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NonZeroI16 {
loop { if let Some(nz) = NonZeroI16::new(rng.random()) { break nz; } }
}
}impl_nzint!(NonZeroI16, NonZeroI16::new);
105impl Distribution<NonZeroI32> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NonZeroI32 {
loop { if let Some(nz) = NonZeroI32::new(rng.random()) { break nz; } }
}
}impl_nzint!(NonZeroI32, NonZeroI32::new);
106impl Distribution<NonZeroI64> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NonZeroI64 {
loop { if let Some(nz) = NonZeroI64::new(rng.random()) { break nz; } }
}
}impl_nzint!(NonZeroI64, NonZeroI64::new);
107impl Distribution<NonZeroI128> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> NonZeroI128 {
loop {
if let Some(nz) = NonZeroI128::new(rng.random()) { break nz; }
}
}
}impl_nzint!(NonZeroI128, NonZeroI128::new);
108
109#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
110impl Distribution<__m128i> for StandardUniform {
111 #[inline]
112 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> __m128i {
113 let mut buf = [0_u8; core::mem::size_of::<__m128i>()];
118 rng.fill_bytes(&mut buf);
119 unsafe { core::mem::transmute(buf) }
123 }
124}
125
126#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
127impl Distribution<__m256i> for StandardUniform {
128 #[inline]
129 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> __m256i {
130 let mut buf = [0_u8; core::mem::size_of::<__m256i>()];
131 rng.fill_bytes(&mut buf);
132 unsafe { core::mem::transmute(buf) }
136 }
137}
138
139#[cfg(all(
140 any(target_arch = "x86", target_arch = "x86_64"),
141 feature = "simd_support"
142))]
143impl Distribution<__m512i> for StandardUniform {
144 #[inline]
145 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> __m512i {
146 let mut buf = [0_u8; core::mem::size_of::<__m512i>()];
147 rng.fill_bytes(&mut buf);
148 unsafe { core::mem::transmute(buf) }
152 }
153}
154
155#[cfg(feature = "simd_support")]
156macro_rules! simd_impl {
157 ($($ty:ty),+) => {$(
158 #[cfg(feature = "simd_support")]
162 impl<const LANES: usize> Distribution<Simd<$ty, LANES>> for StandardUniform {
163 #[inline]
164 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Simd<$ty, LANES> {
165 let mut vec = Simd::default();
166 rng.fill(vec.as_mut_array().as_mut_slice());
167 vec
168 }
169 }
170 )+};
171}
172
173#[cfg(feature = "simd_support")]
174simd_impl!(u8, i8, u16, i16, u32, i32, u64, i64);
175
176#[cfg(test)]
177mod tests {
178 use super::*;
179
180 #[test]
181 fn test_integers() {
182 let mut rng = crate::test::rng(806);
183
184 rng.sample::<i8, _>(StandardUniform);
185 rng.sample::<i16, _>(StandardUniform);
186 rng.sample::<i32, _>(StandardUniform);
187 rng.sample::<i64, _>(StandardUniform);
188 rng.sample::<i128, _>(StandardUniform);
189
190 rng.sample::<u8, _>(StandardUniform);
191 rng.sample::<u16, _>(StandardUniform);
192 rng.sample::<u32, _>(StandardUniform);
193 rng.sample::<u64, _>(StandardUniform);
194 rng.sample::<u128, _>(StandardUniform);
195 }
196
197 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
198 #[test]
199 fn x86_integers() {
200 let mut rng = crate::test::rng(807);
201
202 rng.sample::<__m128i, _>(StandardUniform);
203 rng.sample::<__m256i, _>(StandardUniform);
204 #[cfg(feature = "simd_support")]
205 rng.sample::<__m512i, _>(StandardUniform);
206 }
207
208 #[test]
209 fn value_stability() {
210 fn test_samples<T: Copy + core::fmt::Debug + PartialEq>(zero: T, expected: &[T])
211 where
212 StandardUniform: Distribution<T>,
213 {
214 let mut rng = crate::test::rng(807);
215 let mut buf = [zero; 3];
216 for x in &mut buf {
217 *x = rng.sample(StandardUniform);
218 }
219 assert_eq!(&buf, expected);
220 }
221
222 test_samples(0u8, &[9, 247, 111]);
223 test_samples(0u16, &[32265, 42999, 38255]);
224 test_samples(0u32, &[2220326409, 2575017975, 2018088303]);
225 test_samples(
226 0u64,
227 &[
228 11059617991457472009,
229 16096616328739788143,
230 1487364411147516184,
231 ],
232 );
233 test_samples(
234 0u128,
235 &[
236 296930161868957086625409848350820761097,
237 145644820879247630242265036535529306392,
238 111087889832015897993126088499035356354,
239 ],
240 );
241
242 test_samples(0i8, &[9, -9, 111]);
243 #[cfg(feature = "simd_support")]
246 {
247 test_samples(
250 u8x4::default(),
251 &[
252 u8x4::from([9, 126, 87, 132]),
253 u8x4::from([247, 167, 123, 153]),
254 u8x4::from([111, 149, 73, 120]),
255 ],
256 );
257 test_samples(
258 u8x8::default(),
259 &[
260 u8x8::from([9, 126, 87, 132, 247, 167, 123, 153]),
261 u8x8::from([111, 149, 73, 120, 68, 171, 98, 223]),
262 u8x8::from([24, 121, 1, 50, 13, 46, 164, 20]),
263 ],
264 );
265
266 test_samples(
267 i64x8::default(),
268 &[
269 i64x8::from([
270 -7387126082252079607,
271 -2350127744969763473,
272 1487364411147516184,
273 7895421560427121838,
274 602190064936008898,
275 6022086574635100741,
276 -5080089175222015595,
277 -4066367846667249123,
278 ]),
279 i64x8::from([
280 9180885022207963908,
281 3095981199532211089,
282 6586075293021332726,
283 419343203796414657,
284 3186951873057035255,
285 5287129228749947252,
286 444726432079249540,
287 -1587028029513790706,
288 ]),
289 i64x8::from([
290 6075236523189346388,
291 1351763722368165432,
292 -6192309979959753740,
293 -7697775502176768592,
294 -4482022114172078123,
295 7522501477800909500,
296 -1837258847956201231,
297 -586926753024886735,
298 ]),
299 ],
300 );
301 }
302 }
303}