diff --git a/stl/inc/random b/stl/inc/random index 4d684d36c1..b832a9bc19 100644 --- a/stl/inc/random +++ b/stl/inc/random @@ -238,19 +238,44 @@ private: vector _Myvec; }; +_NODISCARD constexpr int _Generate_canonical_iterations(const int _Bits, const uint64_t _Gmin, const uint64_t _Gmax) { + // For a URBG type `G` with range == `(G::max() - G::min()) + 1`, returns the number of calls to generate at least + // _Bits bits of entropy. Specifically, max(1, ceil(_Bits / log2(range))). + + _STL_INTERNAL_CHECK(0 <= _Bits && _Bits <= 64); + + if (_Bits == 0 || (_Gmax == UINT64_MAX && _Gmin == 0)) { + return 1; + } + + const auto _Range = (_Gmax - _Gmin) + 1; + const auto _Target = ~uint64_t{0} >> (64 - _Bits); + uint64_t _Prod = 1; + int _Ceil = 0; + while (_Prod <= _Target) { + ++_Ceil; + if (_Prod > UINT64_MAX / _Range) { + break; + } + + _Prod *= _Range; + } + + return _Ceil; +} + template _NODISCARD _Real generate_canonical(_Gen& _Gx) { // build a floating-point value from random sequence _RNG_REQUIRE_REALTYPE(generate_canonical, _Real); - const size_t _Digits = static_cast(numeric_limits<_Real>::digits); - const size_t _Minbits = _Digits < _Bits ? _Digits : _Bits; + constexpr auto _Digits = static_cast(numeric_limits<_Real>::digits); + constexpr auto _Minbits = static_cast(_Digits < _Bits ? _Digits : _Bits); - const _Real _Gxmin = static_cast<_Real>((_Gx.min)()); - const _Real _Gxmax = static_cast<_Real>((_Gx.max)()); - const _Real _Rx = (_Gxmax - _Gxmin) + _Real{1}; + constexpr auto _Gxmin = static_cast<_Real>((_Gen::min)()); + constexpr auto _Gxmax = static_cast<_Real>((_Gen::max)()); + constexpr auto _Rx = (_Gxmax - _Gxmin) + _Real{1}; - const int _Ceil = static_cast(_STD ceil(static_cast<_Real>(_Minbits) / _STD log2(_Rx))); - const int _Kx = _Ceil < 1 ? 1 : _Ceil; + constexpr int _Kx = _Generate_canonical_iterations(_Minbits, (_Gen::min)(), (_Gen::max)()); _Real _Ans{0}; _Real _Factor{1}; @@ -263,7 +288,45 @@ _NODISCARD _Real generate_canonical(_Gen& _Gx) { // build a floating-point value return _Ans / _Factor; } -#define _NRAND(eng, resty) (_STD generate_canonical(-1)>(eng)) +template +struct _Has_static_min_max : false_type {}; + +// This checks a requirement of N4901 [rand.req.urng] `concept uniform_random_bit_generator` but doesn't attempt +// to implement the whole concept - we just need to distinguish Standard machinery from tr1 machinery. +template +struct _Has_static_min_max<_Gen, void_t::value)>> : true_type {}; + +template +_NODISCARD _Real _Nrand_impl(_Gen& _Gx) { // build a floating-point value from random sequence + _RNG_REQUIRE_REALTYPE(_Nrand_impl, _Real); + + constexpr auto _Digits = static_cast(numeric_limits<_Real>::digits); + constexpr auto _Bits = ~size_t{0}; + constexpr auto _Minbits = _Digits < _Bits ? _Digits : _Bits; + + if constexpr (_Has_static_min_max<_Gen>::value && _Minbits <= 64) { + return _STD generate_canonical<_Real, _Minbits>(_Gx); + } else { // TRANSITION, for tr1 machinery only; Standard machinery can call generate_canonical directly + const _Real _Gxmin = static_cast<_Real>((_Gx.min)()); + const _Real _Gxmax = static_cast<_Real>((_Gx.max)()); + const _Real _Rx = (_Gxmax - _Gxmin) + _Real{1}; + + const int _Ceil = static_cast(_STD ceil(static_cast<_Real>(_Minbits) / _STD log2(_Rx))); + const int _Kx = _Ceil < 1 ? 1 : _Ceil; + + _Real _Ans{0}; + _Real _Factor{1}; + + for (int _Idx = 0; _Idx < _Kx; ++_Idx) { // add in another set of bits + _Ans += (static_cast<_Real>(_Gx()) - _Gxmin) * _Factor; + _Factor *= _Rx; + } + + return _Ans / _Factor; + } +} + +#define _NRAND(eng, resty) (_Nrand_impl(eng)) _INLINE_VAR constexpr int _MP_len = 5; using _MP_arr = uint64_t[_MP_len]; diff --git a/tests/std/test.lst b/tests/std/test.lst index 9296abdddd..152e1e4ff0 100644 --- a/tests/std/test.lst +++ b/tests/std/test.lst @@ -186,6 +186,7 @@ tests\GH_001638_dllexport_derived_classes tests\GH_001850_clog_tied_to_cout tests\GH_001858_iostream_exception tests\GH_001914_cached_position +tests\GH_001964_constexpr_generate_canonical tests\GH_002030_asan_annotate_vector tests\GH_002039_byte_is_not_trivially_swappable tests\GH_002058_debug_iterator_race diff --git a/tests/std/tests/GH_001964_constexpr_generate_canonical/env.lst b/tests/std/tests/GH_001964_constexpr_generate_canonical/env.lst new file mode 100644 index 0000000000..19f025bd0e --- /dev/null +++ b/tests/std/tests/GH_001964_constexpr_generate_canonical/env.lst @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +RUNALL_INCLUDE ..\usual_matrix.lst diff --git a/tests/std/tests/GH_001964_constexpr_generate_canonical/test.cpp b/tests/std/tests/GH_001964_constexpr_generate_canonical/test.cpp new file mode 100644 index 0000000000..b8d40cb9f1 --- /dev/null +++ b/tests/std/tests/GH_001964_constexpr_generate_canonical/test.cpp @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include +#include +#include +#include +#include + +using namespace std; + +#define STATIC_ASSERT(...) static_assert(__VA_ARGS__, #__VA_ARGS__) + +int naive_iterations(const int bits, const uint64_t gmin, const uint64_t gmax) { + // Naive implementation of [rand.util.canonical]. Note that for large values of range, it's possible that + // log2(range) == bits when range < 2^bits. This can lead to incorrect results, so we can't use this function as + // a reference for all values. + + const double range = static_cast(gmax) - static_cast(gmin) + 1.0; + return max(1, static_cast(ceil(static_cast(bits) / log2(range)))); +} + +void test(const int target_bits) { + // Increase the range until the number of iterations repeats. + uint64_t range = 2; + int k = 0; + int prev_k = -1; + while (k != prev_k) { + prev_k = exchange(k, naive_iterations(target_bits, 1, range)); + const int k1 = _Generate_canonical_iterations(target_bits, 1, range); + assert(k == k1); + ++range; + } + + // Now only check the crossover points, where incrementing the range actually causes the number of iterations to + // increase. + while (--k != 0) { + // The largest range such that k iterations generating [1,range] produces less than target_bits bits. + if (k == 1) { + range = ~uint64_t{0} >> (64 - target_bits); + } else { + range = static_cast(ceil(pow(2.0, static_cast(target_bits) / k))) - 1; + } + + int k0 = (k == 1) ? 2 : naive_iterations(target_bits, 1, range); + int k1 = _Generate_canonical_iterations(target_bits, 1, range); + assert(k0 == k1); + assert(k1 == k + 1); + + k0 = (k == 1) ? 1 : naive_iterations(target_bits, 0, range); + k1 = _Generate_canonical_iterations(target_bits, 0, range); + assert(k0 == k1); + assert(k1 == k); + } +} + +int main() { + STATIC_ASSERT(_Generate_canonical_iterations(53, 1, uint64_t{1} << 32) == 2); + STATIC_ASSERT(_Generate_canonical_iterations(64, 0, ~uint64_t{0}) == 1); + + for (int bits = 0; bits <= 64; ++bits) { + test(bits); + } +}