Skip to main content

rand/distr/
uniform_float.rs

1// Copyright 2018-2020 Developers of the Rand project.
2// Copyright 2017 The Rust Project Developers.
3//
4// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
5// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
7// option. This file may not be copied, modified, or distributed
8// except according to those terms.
9
10//! `UniformFloat` implementation
11
12use super::{Error, SampleBorrow, SampleUniform, UniformSampler};
13use crate::distr::float::IntoFloat;
14use crate::distr::utils::{BoolAsSIMD, FloatAsSIMD, FloatSIMDUtils, IntAsSIMD};
15use crate::{Rng, RngExt};
16
17#[cfg(feature = "simd_support")]
18use core::simd::prelude::*;
19
20#[cfg(feature = "serde")]
21use serde::{Deserialize, Serialize};
22
23/// The back-end implementing [`UniformSampler`] for floating-point types.
24///
25/// Unless you are implementing [`UniformSampler`] for your own type, this type
26/// should not be used directly, use [`Uniform`] instead.
27///
28/// # Implementation notes
29///
30/// `UniformFloat` implementations convert RNG output to a float in the range
31/// `[1, 2)` via transmutation, map this to `[0, 1)`, then scale and translate
32/// to the desired range. Values produced this way have what equals 23 bits of
33/// random digits for an `f32` and 52 for an `f64`.
34///
35/// # Bias and range errors
36///
37/// Bias may be expected within the least-significant bit of the significand.
38/// It is not guaranteed that exclusive limits of a range are respected; i.e.
39/// when sampling the range `[a, b)` it is not guaranteed that `b` is never
40/// sampled.
41///
42/// [`new`]: UniformSampler::new
43/// [`new_inclusive`]: UniformSampler::new_inclusive
44/// [`StandardUniform`]: crate::distr::StandardUniform
45/// [`Uniform`]: super::Uniform
46#[derive(#[automatically_derived]
impl<X: ::core::clone::Clone> ::core::clone::Clone for UniformFloat<X> {
    #[inline]
    fn clone(&self) -> UniformFloat<X> {
        UniformFloat {
            low: ::core::clone::Clone::clone(&self.low),
            scale: ::core::clone::Clone::clone(&self.scale),
        }
    }
}Clone, #[automatically_derived]
impl<X: ::core::marker::Copy> ::core::marker::Copy for UniformFloat<X> { }Copy, #[automatically_derived]
impl<X: ::core::fmt::Debug> ::core::fmt::Debug for UniformFloat<X> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field2_finish(f, "UniformFloat",
            "low", &self.low, "scale", &&self.scale)
    }
}Debug, #[automatically_derived]
impl<X: ::core::cmp::PartialEq> ::core::cmp::PartialEq for UniformFloat<X> {
    #[inline]
    fn eq(&self, other: &UniformFloat<X>) -> bool {
        self.low == other.low && self.scale == other.scale
    }
}PartialEq)]
47#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
48pub struct UniformFloat<X> {
49    low: X,
50    scale: X,
51}
52
53macro_rules! uniform_float_impl {
54    ($($meta:meta)?, $ty:ty, $uty:ident, $f_scalar:ident, $u_scalar:ident, $bits_to_discard:expr) => {
55        $(#[cfg($meta)])?
56        impl UniformFloat<$ty> {
57            /// Construct, reducing `scale` as required to ensure that rounding
58            /// can never yield values greater than `high`.
59            ///
60            /// Note: though it may be tempting to use a variant of this method
61            /// to ensure that samples from `[low, high)` are always strictly
62            /// less than `high`, this approach may be very slow where
63            /// `scale.abs()` is much smaller than `high.abs()`
64            /// (example: `low=0.99999999997819644, high=1.`).
65            fn new_bounded(low: $ty, high: $ty, mut scale: $ty) -> Self {
66                let max_rand = <$ty>::splat(1.0 as $f_scalar - $f_scalar::EPSILON);
67
68                loop {
69                    let mask = (scale * max_rand + low).gt_mask(high);
70                    if !mask.any() {
71                        break;
72                    }
73                    scale = scale.decrease_masked(mask);
74                }
75
76                debug_assert!(<$ty>::splat(0.0).all_le(scale));
77
78                UniformFloat { low, scale }
79            }
80        }
81
82        $(#[cfg($meta)])?
83        impl SampleUniform for $ty {
84            type Sampler = UniformFloat<$ty>;
85        }
86
87        $(#[cfg($meta)])?
88        impl UniformSampler for UniformFloat<$ty> {
89            type X = $ty;
90
91            fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
92            where
93                B1: SampleBorrow<Self::X> + Sized,
94                B2: SampleBorrow<Self::X> + Sized,
95            {
96                let low = *low_b.borrow();
97                let high = *high_b.borrow();
98                #[cfg(debug_assertions)]
99                if !(low.all_finite()) || !(high.all_finite()) {
100                    return Err(Error::NonFinite);
101                }
102                if !(low.all_lt(high)) {
103                    return Err(Error::EmptyRange);
104                }
105
106                let scale = high - low;
107                if !(scale.all_finite()) {
108                    return Err(Error::NonFinite);
109                }
110
111                Ok(Self::new_bounded(low, high, scale))
112            }
113
114            fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
115            where
116                B1: SampleBorrow<Self::X> + Sized,
117                B2: SampleBorrow<Self::X> + Sized,
118            {
119                let low = *low_b.borrow();
120                let high = *high_b.borrow();
121                #[cfg(debug_assertions)]
122                if !(low.all_finite()) || !(high.all_finite()) {
123                    return Err(Error::NonFinite);
124                }
125                if !low.all_le(high) {
126                    return Err(Error::EmptyRange);
127                }
128
129                let max_rand = <$ty>::splat(1.0 as $f_scalar - $f_scalar::EPSILON);
130                let scale = (high - low) / max_rand;
131                if !scale.all_finite() {
132                    return Err(Error::NonFinite);
133                }
134
135                Ok(Self::new_bounded(low, high, scale))
136            }
137
138            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
139                // Generate a value in the range [1, 2)
140                let value1_2 = (rng.random::<$uty>() >> $uty::splat($bits_to_discard)).into_float_with_exponent(0);
141
142                // Get a value in the range [0, 1) to avoid overflow when multiplying by scale
143                let value0_1 = value1_2 - <$ty>::splat(1.0);
144
145                // We don't use `f64::mul_add`, because it is not available with
146                // `no_std`. Furthermore, it is slower for some targets (but
147                // faster for others). However, the order of multiplication and
148                // addition is important, because on some platforms (e.g. ARM)
149                // it will be optimized to a single (non-FMA) instruction.
150                value0_1 * self.scale + self.low
151            }
152
153            #[inline]
154            fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Result<Self::X, Error>
155            where
156                B1: SampleBorrow<Self::X> + Sized,
157                B2: SampleBorrow<Self::X> + Sized,
158            {
159                Self::sample_single_inclusive(low_b, high_b, rng)
160            }
161
162            #[inline]
163            fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Result<Self::X, Error>
164            where
165                B1: SampleBorrow<Self::X> + Sized,
166                B2: SampleBorrow<Self::X> + Sized,
167            {
168                let low = *low_b.borrow();
169                let high = *high_b.borrow();
170                #[cfg(debug_assertions)]
171                if !low.all_finite() || !high.all_finite() {
172                    return Err(Error::NonFinite);
173                }
174                if !low.all_le(high) {
175                    return Err(Error::EmptyRange);
176                }
177                let scale = high - low;
178                if !scale.all_finite() {
179                    return Err(Error::NonFinite);
180                }
181
182                // Generate a value in the range [1, 2)
183                let value1_2 =
184                    (rng.random::<$uty>() >> $uty::splat($bits_to_discard)).into_float_with_exponent(0);
185
186                // Get a value in the range [0, 1) to avoid overflow when multiplying by scale
187                let value0_1 = value1_2 - <$ty>::splat(1.0);
188
189                // Doing multiply before addition allows some architectures
190                // to use a single instruction.
191                Ok(value0_1 * scale + low)
192            }
193        }
194    };
195}
196
197impl UniformFloat<f32> {
    /// Construct, reducing `scale` as required to ensure that rounding
    /// can never yield values greater than `high`.
    ///
    /// Note: though it may be tempting to use a variant of this method
    /// to ensure that samples from `[low, high)` are always strictly
    /// less than `high`, this approach may be very slow where
    /// `scale.abs()` is much smaller than `high.abs()`
    /// (example: `low=0.99999999997819644, high=1.`).
    fn new_bounded(low: f32, high: f32, mut scale: f32) -> Self {
        let max_rand = <f32>::splat(1.0 as f32 - f32::EPSILON);
        loop {
            let mask = (scale * max_rand + low).gt_mask(high);
            if !mask.any() { break; }
            scale = scale.decrease_masked(mask);
        }
        if true {
            if !<f32>::splat(0.0).all_le(scale) {
                ::core::panicking::panic("assertion failed: <f32>::splat(0.0).all_le(scale)")
            };
        };
        UniformFloat { low, scale }
    }
}
impl SampleUniform for f32 {
    type Sampler = UniformFloat<f32>;
}
impl UniformSampler for UniformFloat<f32> {
    type X = f32;
    fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error> where
        B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized {
        let low = *low_b.borrow();
        let high = *high_b.borrow();
        if !(low.all_finite()) || !(high.all_finite()) {
            return Err(Error::NonFinite);
        }
        if !(low.all_lt(high)) { return Err(Error::EmptyRange); }
        let scale = high - low;
        if !(scale.all_finite()) { return Err(Error::NonFinite); }
        Ok(Self::new_bounded(low, high, scale))
    }
    fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
        where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> +
        Sized {
        let low = *low_b.borrow();
        let high = *high_b.borrow();
        if !(low.all_finite()) || !(high.all_finite()) {
            return Err(Error::NonFinite);
        }
        if !low.all_le(high) { return Err(Error::EmptyRange); }
        let max_rand = <f32>::splat(1.0 as f32 - f32::EPSILON);
        let scale = (high - low) / max_rand;
        if !scale.all_finite() { return Err(Error::NonFinite); }
        Ok(Self::new_bounded(low, high, scale))
    }
    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
        let value1_2 =
            (rng.random::<u32>() >>
                        u32::splat(32 - 23)).into_float_with_exponent(0);
        let value0_1 = value1_2 - <f32>::splat(1.0);
        value0_1 * self.scale + self.low
    }
    #[inline]
    fn sample_single<R: Rng + ?Sized, B1,
        B2>(low_b: B1, high_b: B2, rng: &mut R) -> Result<Self::X, Error>
        where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> +
        Sized {
        Self::sample_single_inclusive(low_b, high_b, rng)
    }
    #[inline]
    fn sample_single_inclusive<R: Rng + ?Sized, B1,
        B2>(low_b: B1, high_b: B2, rng: &mut R) -> Result<Self::X, Error>
        where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> +
        Sized {
        let low = *low_b.borrow();
        let high = *high_b.borrow();
        if !low.all_finite() || !high.all_finite() {
            return Err(Error::NonFinite);
        }
        if !low.all_le(high) { return Err(Error::EmptyRange); }
        let scale = high - low;
        if !scale.all_finite() { return Err(Error::NonFinite); }
        let value1_2 =
            (rng.random::<u32>() >>
                        u32::splat(32 - 23)).into_float_with_exponent(0);
        let value0_1 = value1_2 - <f32>::splat(1.0);
        Ok(value0_1 * scale + low)
    }
}uniform_float_impl! { , f32, u32, f32, u32, 32 - 23 }
198impl UniformFloat<f64> {
    /// Construct, reducing `scale` as required to ensure that rounding
    /// can never yield values greater than `high`.
    ///
    /// Note: though it may be tempting to use a variant of this method
    /// to ensure that samples from `[low, high)` are always strictly
    /// less than `high`, this approach may be very slow where
    /// `scale.abs()` is much smaller than `high.abs()`
    /// (example: `low=0.99999999997819644, high=1.`).
    fn new_bounded(low: f64, high: f64, mut scale: f64) -> Self {
        let max_rand = <f64>::splat(1.0 as f64 - f64::EPSILON);
        loop {
            let mask = (scale * max_rand + low).gt_mask(high);
            if !mask.any() { break; }
            scale = scale.decrease_masked(mask);
        }
        if true {
            if !<f64>::splat(0.0).all_le(scale) {
                ::core::panicking::panic("assertion failed: <f64>::splat(0.0).all_le(scale)")
            };
        };
        UniformFloat { low, scale }
    }
}
impl SampleUniform for f64 {
    type Sampler = UniformFloat<f64>;
}
impl UniformSampler for UniformFloat<f64> {
    type X = f64;
    fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error> where
        B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> + Sized {
        let low = *low_b.borrow();
        let high = *high_b.borrow();
        if !(low.all_finite()) || !(high.all_finite()) {
            return Err(Error::NonFinite);
        }
        if !(low.all_lt(high)) { return Err(Error::EmptyRange); }
        let scale = high - low;
        if !(scale.all_finite()) { return Err(Error::NonFinite); }
        Ok(Self::new_bounded(low, high, scale))
    }
    fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
        where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> +
        Sized {
        let low = *low_b.borrow();
        let high = *high_b.borrow();
        if !(low.all_finite()) || !(high.all_finite()) {
            return Err(Error::NonFinite);
        }
        if !low.all_le(high) { return Err(Error::EmptyRange); }
        let max_rand = <f64>::splat(1.0 as f64 - f64::EPSILON);
        let scale = (high - low) / max_rand;
        if !scale.all_finite() { return Err(Error::NonFinite); }
        Ok(Self::new_bounded(low, high, scale))
    }
    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
        let value1_2 =
            (rng.random::<u64>() >>
                        u64::splat(64 - 52)).into_float_with_exponent(0);
        let value0_1 = value1_2 - <f64>::splat(1.0);
        value0_1 * self.scale + self.low
    }
    #[inline]
    fn sample_single<R: Rng + ?Sized, B1,
        B2>(low_b: B1, high_b: B2, rng: &mut R) -> Result<Self::X, Error>
        where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> +
        Sized {
        Self::sample_single_inclusive(low_b, high_b, rng)
    }
    #[inline]
    fn sample_single_inclusive<R: Rng + ?Sized, B1,
        B2>(low_b: B1, high_b: B2, rng: &mut R) -> Result<Self::X, Error>
        where B1: SampleBorrow<Self::X> + Sized, B2: SampleBorrow<Self::X> +
        Sized {
        let low = *low_b.borrow();
        let high = *high_b.borrow();
        if !low.all_finite() || !high.all_finite() {
            return Err(Error::NonFinite);
        }
        if !low.all_le(high) { return Err(Error::EmptyRange); }
        let scale = high - low;
        if !scale.all_finite() { return Err(Error::NonFinite); }
        let value1_2 =
            (rng.random::<u64>() >>
                        u64::splat(64 - 52)).into_float_with_exponent(0);
        let value0_1 = value1_2 - <f64>::splat(1.0);
        Ok(value0_1 * scale + low)
    }
}uniform_float_impl! { , f64, u64, f64, u64, 64 - 52 }
199
200#[cfg(feature = "simd_support")]
201uniform_float_impl! { feature = "simd_support", f32x2, u32x2, f32, u32, 32 - 23 }
202#[cfg(feature = "simd_support")]
203uniform_float_impl! { feature = "simd_support", f32x4, u32x4, f32, u32, 32 - 23 }
204#[cfg(feature = "simd_support")]
205uniform_float_impl! { feature = "simd_support", f32x8, u32x8, f32, u32, 32 - 23 }
206#[cfg(feature = "simd_support")]
207uniform_float_impl! { feature = "simd_support", f32x16, u32x16, f32, u32, 32 - 23 }
208
209#[cfg(feature = "simd_support")]
210uniform_float_impl! { feature = "simd_support", f64x2, u64x2, f64, u64, 64 - 52 }
211#[cfg(feature = "simd_support")]
212uniform_float_impl! { feature = "simd_support", f64x4, u64x4, f64, u64, 64 - 52 }
213#[cfg(feature = "simd_support")]
214uniform_float_impl! { feature = "simd_support", f64x8, u64x8, f64, u64, 64 - 52 }
215
216#[cfg(test)]
217mod tests {
218    use super::*;
219    use crate::distr::{Uniform, utils::FloatSIMDScalarUtils};
220    use crate::test::{const_rng, step_rng};
221
222    #[test]
223    #[cfg_attr(miri, ignore)] // Miri is too slow
224    fn test_floats() {
225        let mut rng = crate::test::rng(252);
226        let mut zero_rng = const_rng(0);
227        let mut max_rng = const_rng(0xffff_ffff_ffff_ffff);
228        macro_rules! t {
229            ($ty:ty, $f_scalar:ident, $bits_shifted:expr) => {{
230                let v: &[($f_scalar, $f_scalar)] = &[
231                    (0.0, 100.0),
232                    (-1e35, -1e25),
233                    (1e-35, 1e-25),
234                    (-1e35, 1e35),
235                    (<$f_scalar>::from_bits(0), <$f_scalar>::from_bits(3)),
236                    (-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)),
237                    (-<$f_scalar>::from_bits(5), 0.0),
238                    (-<$f_scalar>::from_bits(7), -0.0),
239                    (0.1 * $f_scalar::MAX, $f_scalar::MAX),
240                    (-$f_scalar::MAX * 0.2, $f_scalar::MAX * 0.7),
241                ];
242                for &(low_scalar, high_scalar) in v.iter() {
243                    for lane in 0..<$ty>::LEN {
244                        let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
245                        let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
246                        let my_uniform = Uniform::new(low, high).unwrap();
247                        let my_incl_uniform = Uniform::new_inclusive(low, high).unwrap();
248                        for _ in 0..100 {
249                            let v = rng.sample(my_uniform).extract_lane(lane);
250                            assert!(low_scalar <= v && v <= high_scalar);
251                            let v = rng.sample(my_incl_uniform).extract_lane(lane);
252                            assert!(low_scalar <= v && v <= high_scalar);
253                            let v =
254                                <$ty as SampleUniform>::Sampler::sample_single(low, high, &mut rng)
255                                    .unwrap()
256                                    .extract_lane(lane);
257                            assert!(low_scalar <= v && v <= high_scalar);
258                            let v = <$ty as SampleUniform>::Sampler::sample_single_inclusive(
259                                low, high, &mut rng,
260                            )
261                            .unwrap()
262                            .extract_lane(lane);
263                            assert!(low_scalar <= v && v <= high_scalar);
264                        }
265
266                        assert_eq!(
267                            rng.sample(Uniform::new_inclusive(low, low).unwrap())
268                                .extract_lane(lane),
269                            low_scalar
270                        );
271
272                        assert_eq!(zero_rng.sample(my_uniform).extract_lane(lane), low_scalar);
273                        assert_eq!(
274                            zero_rng.sample(my_incl_uniform).extract_lane(lane),
275                            low_scalar
276                        );
277                        assert_eq!(
278                            <$ty as SampleUniform>::Sampler::sample_single(
279                                low,
280                                high,
281                                &mut zero_rng
282                            )
283                            .unwrap()
284                            .extract_lane(lane),
285                            low_scalar
286                        );
287                        assert_eq!(
288                            <$ty as SampleUniform>::Sampler::sample_single_inclusive(
289                                low,
290                                high,
291                                &mut zero_rng
292                            )
293                            .unwrap()
294                            .extract_lane(lane),
295                            low_scalar
296                        );
297
298                        assert!(max_rng.sample(my_uniform).extract_lane(lane) <= high_scalar);
299                        assert!(max_rng.sample(my_incl_uniform).extract_lane(lane) <= high_scalar);
300                        // sample_single cannot cope with max_rng:
301                        // assert!(<$ty as SampleUniform>::Sampler
302                        //     ::sample_single(low, high, &mut max_rng).unwrap()
303                        //     .extract(lane) <= high_scalar);
304                        assert!(
305                            <$ty as SampleUniform>::Sampler::sample_single_inclusive(
306                                low,
307                                high,
308                                &mut max_rng
309                            )
310                            .unwrap()
311                            .extract_lane(lane)
312                                <= high_scalar
313                        );
314
315                        // Don't run this test for really tiny differences between high and low
316                        // since for those rounding might result in selecting high for a very
317                        // long time.
318                        if (high_scalar - low_scalar) > 0.0001 {
319                            let mut lowering_max_rng =
320                                step_rng(0xffff_ffff_ffff_ffff, (-1i64 << $bits_shifted) as u64);
321                            assert!(
322                                <$ty as SampleUniform>::Sampler::sample_single(
323                                    low,
324                                    high,
325                                    &mut lowering_max_rng
326                                )
327                                .unwrap()
328                                .extract_lane(lane)
329                                    <= high_scalar
330                            );
331                        }
332                    }
333                }
334
335                assert_eq!(
336                    rng.sample(Uniform::new_inclusive($f_scalar::MAX, $f_scalar::MAX).unwrap()),
337                    $f_scalar::MAX
338                );
339                assert_eq!(
340                    rng.sample(Uniform::new_inclusive(-$f_scalar::MAX, -$f_scalar::MAX).unwrap()),
341                    -$f_scalar::MAX
342                );
343            }};
344        }
345
346        t!(f32, f32, 32 - 23);
347        t!(f64, f64, 64 - 52);
348        #[cfg(feature = "simd_support")]
349        {
350            t!(f32x2, f32, 32 - 23);
351            t!(f32x4, f32, 32 - 23);
352            t!(f32x8, f32, 32 - 23);
353            t!(f32x16, f32, 32 - 23);
354            t!(f64x2, f64, 64 - 52);
355            t!(f64x4, f64, 64 - 52);
356            t!(f64x8, f64, 64 - 52);
357        }
358    }
359
360    #[test]
361    fn test_float_overflow() {
362        assert_eq!(Uniform::try_from(f64::MIN..f64::MAX), Err(Error::NonFinite));
363    }
364
365    #[test]
366    #[should_panic]
367    fn test_float_overflow_single() {
368        let mut rng = crate::test::rng(252);
369        rng.random_range(f64::MIN..f64::MAX);
370    }
371
372    #[test]
373    #[cfg(all(feature = "std", panic = "unwind"))]
374    fn test_float_assertions() {
375        use super::SampleUniform;
376        fn range<T: SampleUniform>(low: T, high: T) -> Result<T, Error> {
377            let mut rng = crate::test::rng(253);
378            T::Sampler::sample_single(low, high, &mut rng)
379        }
380
381        macro_rules! t {
382            ($ty:ident, $f_scalar:ident) => {{
383                let v: &[($f_scalar, $f_scalar)] = &[
384                    ($f_scalar::NAN, 0.0),
385                    (1.0, $f_scalar::NAN),
386                    ($f_scalar::NAN, $f_scalar::NAN),
387                    (1.0, 0.5),
388                    ($f_scalar::MAX, -$f_scalar::MAX),
389                    ($f_scalar::INFINITY, $f_scalar::INFINITY),
390                    ($f_scalar::NEG_INFINITY, $f_scalar::NEG_INFINITY),
391                    ($f_scalar::NEG_INFINITY, 5.0),
392                    (5.0, $f_scalar::INFINITY),
393                    ($f_scalar::NAN, $f_scalar::INFINITY),
394                    ($f_scalar::NEG_INFINITY, $f_scalar::NAN),
395                    ($f_scalar::NEG_INFINITY, $f_scalar::INFINITY),
396                ];
397                for &(low_scalar, high_scalar) in v.iter() {
398                    for lane in 0..<$ty>::LEN {
399                        let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
400                        let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
401                        assert!(range(low, high).is_err());
402                        assert!(Uniform::new(low, high).is_err());
403                        assert!(Uniform::new_inclusive(low, high).is_err());
404                        assert!(Uniform::new(low, low).is_err());
405                    }
406                }
407            }};
408        }
409
410        t!(f32, f32);
411        t!(f64, f64);
412        #[cfg(feature = "simd_support")]
413        {
414            t!(f32x2, f32);
415            t!(f32x4, f32);
416            t!(f32x8, f32);
417            t!(f32x16, f32);
418            t!(f64x2, f64);
419            t!(f64x4, f64);
420            t!(f64x8, f64);
421        }
422    }
423
424    #[test]
425    fn test_uniform_from_std_range() {
426        let r = Uniform::try_from(2.0f64..7.0).unwrap();
427        assert_eq!(r.0.low, 2.0);
428        assert_eq!(r.0.scale, 5.0);
429    }
430
431    #[test]
432    fn test_uniform_from_std_range_bad_limits() {
433        #![allow(clippy::reversed_empty_ranges)]
434        assert!(Uniform::try_from(100.0..10.0).is_err());
435        assert!(Uniform::try_from(100.0..100.0).is_err());
436    }
437
438    #[test]
439    fn test_uniform_from_std_range_inclusive() {
440        let r = Uniform::try_from(2.0f64..=7.0).unwrap();
441        assert_eq!(r.0.low, 2.0);
442        assert!(r.0.scale > 5.0);
443        assert!(r.0.scale < 5.0 + 1e-14);
444    }
445
446    #[test]
447    fn test_uniform_from_std_range_inclusive_bad_limits() {
448        #![allow(clippy::reversed_empty_ranges)]
449        assert!(Uniform::try_from(100.0..=10.0).is_err());
450        assert!(Uniform::try_from(100.0..=99.0).is_err());
451    }
452}