245 template <
template <
typename>
typename FriendAllocatorT>
251 static constexpr size_type block_index_mask = (one << block_offset_shift) - 1;
253 static constexpr size_type block_index(
size_type pos) {
return pos >> block_offset_shift; }
255 static constexpr size_type block_offset(
size_type pos) {
return pos & block_index_mask; }
262 template <
typename BlockT>
263 requires std::same_as<block_type, std::remove_cv_t<BlockT>>
268 constexpr bitref_base(std::span<BlockT> blocks,
size_type pos) noexcept :
269 m_block(blocks[block_index(pos)]), m_mask(one << block_offset(pos)) {}
272 bitref_base() =
delete;
273 bitref_base(
const bitref_base&)
noexcept =
default;
274 bitref_base(bitref_base&&) noexcept = default;
275 bitref_base& operator=(const bitref_base&) = delete;
276 bitref_base& operator=(bitref_base&&) = delete;
278 ~bitref_base() = default;
281 constexpr operator
bool() const noexcept {
return is_set(); }
283 constexpr bool is_set() const noexcept {
return (m_block & m_mask) > 0; }
285 template <std::
integral T>
286 constexpr T as() const noexcept {
287 return static_cast<T>(is_set());
290 constexpr CT::Choice as_choice() const noexcept {
304 template <
typename BlockT>
307 using bitref_base<BlockT>::bitref_base;
317 template <
typename BlockT>
318 requires(!std::is_const_v<BlockT>)
321 using bitref_base<BlockT>::bitref_base;
328 this->m_block |= this->m_mask;
333 this->m_block &= ~this->m_mask;
338 this->m_block ^= this->m_mask;
357 this->m_block &= ~CT::Mask<BlockT>::expand(other).if_not_set_return(this->m_mask);
390 bitvector_base(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
390 bitvector_base(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
…}
394 bitvector_base(std::initializer_list<block_type> blocks, std::optional<size_type> bits = std::nullopt) :
394 bitvector_base(std::initializer_list<block_type> blocks, std::optional<size_type> bits = std::nullopt) : {
…}
397 bool empty()
const {
return m_bits == 0; }
406 full_range_operation([&](std::unsigned_integral
auto block) { acc ^= block; }, *
this);
408 for(
size_t i = (
sizeof(acc) * 8) >> 1; i > 0; i >>= 1) {
421 full_range_operation([&](std::unsigned_integral
auto block) { acc +=
ct_popcount(block); }, *
this);
428 template <bitvectorish OutT>
436 template <bitvectorish OtherT>
438 return size() == other.size() &&
439 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) {
return lhs == rhs; },
447 template <bitvectorish OtherT>
448 bool equals(
const OtherT& other)
const noexcept {
449 return (*
this ^ other).none();
448 bool equals(
const OtherT& other)
const noexcept {
…}
463 void from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
464 m_bits = bits.value_or(bytes.size_bytes() * 8);
465 BOTAN_ARG_CHECK(m_bits <= bytes.size_bytes() * 8,
"not enough data to load so many bits");
471 if(verbatim_blocks > 0) {
472 typecast_copy(std::span{m_blocks}.first(verbatim_blocks), bytes.first(verbatim_bytes));
476 for(
size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
477 ref(i) = ((bytes[i >> 3] & (uint8_t(1) << (i & 7))) != 0);
463 void from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
…}
488 std::conditional_t<uses_secure_allocator, secure_vector<uint8_t>, std::vector<uint8_t>>>
502 BOTAN_ARG_CHECK(bytes_needed <= out.size_bytes(),
"Not enough space to render bitvector");
507 if(verbatim_blocks > 0) {
508 typecast_copy(out.first(verbatim_bytes), std::span{m_blocks}.first(verbatim_blocks));
513 for(
size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
514 out[i >> 3] |= ref(i).template
as<uint8_t>() << (i & 7);
523 std::stringstream ss;
540 const auto new_number_of_blocks = ceil_toblocks(bits);
541 if(new_number_of_blocks != m_blocks.size()) {
542 m_blocks.resize(new_number_of_blocks);
550 const auto i =
size();
580 auto front()
const {
return ref(0); }
601 full_range_operation(
602 [](std::unsigned_integral
auto block) ->
decltype(block) {
return static_cast<decltype(block)
>(~0); },
622 full_range_operation(
623 [](std::unsigned_integral
auto block) ->
decltype(block) {
return static_cast<decltype(block)
>(0); },
642 full_range_operation([](std::unsigned_integral
auto block) ->
decltype(block) {
return ~block; }, *
this);
651 return full_range_operation([](std::unsigned_integral
auto block) {
return block == 0; }, *
this);
673 return full_range_operation(
674 []<std::unsigned_integral BlockT>(BlockT block, BlockT mask) {
return block == mask; }, *
this);
696 template <bitvectorish OutT = bitvector_base<AllocatorT>>
709 newvector_unwrapped.m_blocks,
710 std::span{m_blocks}.subspan(block_index(pos), block_index(pos +
bitlen - 1) - block_index(pos) + 1));
712 BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> from_op(
714 BitRangeOperator<strong_type_wrapped_type<OutT>> to_op(
716 range_operation([](
auto ,
auto from) {
return from; }, to_op, from_op);
719 newvector_unwrapped.zero_unused_bits();
734 template <
typename OutT>
735 requires(std::unsigned_integral<strong_type_wrapped_type<OutT>> &&
736 !std::same_as<bool, strong_type_wrapped_type<OutT>>)
739 constexpr size_t bits =
sizeof(result_t) * 8;
744 out =
load_le<result_t>(std::span{m_blocks}.subspan(block_index(pos)).
template first<
sizeof(result_t)>());
746 BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*
this, pos, bits);
748 [&](std::unsigned_integral
auto integer) {
749 if constexpr(std::same_as<result_t,
decltype(integer)>) {
771 template <
typename InT>
772 requires(std::unsigned_integral<strong_type_wrapped_type<InT>> && !std::same_as<bool, InT>)
775 constexpr size_t bits =
sizeof(in_t) * 8;
779 store_le(std::span{m_blocks}.subspan(block_index(pos)).
template first<
sizeof(in_t)>(),
782 BitRangeOperator<bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*
this, pos, bits);
784 [&]<std::unsigned_integral BlockT>(BlockT block) -> BlockT {
785 if constexpr(std::same_as<in_t, BlockT>) {
813 template <bitvectorish OtherT>
815 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT {
return lhs | rhs; },
821 template <bitvectorish OtherT>
823 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT {
return lhs & rhs; },
829 template <bitvectorish OtherT>
831 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT {
return lhs ^ rhs; },
852 template <bitvectorish OtherT>
859 return lhs ^ m.if_set_return(rhs);
862 return lhs ^ m.if_set_return(rhs);
865 return lhs ^ m.if_set_return(rhs);
868 return lhs ^ m.if_set_return(rhs);
906 void zero_unused_bits() {
907 const auto first_unused_bit =
size();
911 const block_type mask = (one << block_offset(first_unused_bit)) - one;
912 m_blocks[block_index(first_unused_bit)] &= mask;
920 auto ref(
size_type pos)
const {
return bitref<const block_type>(m_blocks, pos); }
922 auto ref(
size_type pos) {
return bitref<block_type>(m_blocks, pos); }
925 enum class BitRangeAlignment { byte_aligned, no_alignment };
938 template <
typename BitvectorT, auto alignment = BitRangeAlignment::
byte_aligned>
939 requires is_bitvector_v<std::remove_cvref_t<BitvectorT>>
940 class BitRangeOperator {
942 constexpr static bool is_const() {
return std::is_const_v<BitvectorT>; }
944 struct UnalignedDataHelper {
945 const uint8_t padding_bits;
946 const uint8_t bits_to_byte_alignment;
950 BitRangeOperator(BitvectorT& source,
size_type start_bitoffset,
size_type bitlength) :
952 m_start_bitoffset(start_bitoffset),
953 m_bitlength(bitlength),
954 m_unaligned_helper({.padding_bits =
static_cast<uint8_t
>(start_bitoffset % 8),
955 .bits_to_byte_alignment =
static_cast<uint8_t
>(8 - (start_bitoffset % 8))}),
956 m_read_bitpos(start_bitoffset),
957 m_write_bitpos(start_bitoffset) {
958 BOTAN_ASSERT(is_byte_aligned() == (m_start_bitoffset % 8 == 0),
"byte alignment guarantee");
959 BOTAN_ASSERT(m_source.size() >= m_start_bitoffset + m_bitlength,
"enough bytes in underlying source");
962 BitRangeOperator(BitvectorT& source) : BitRangeOperator(source, 0, source.size()) {}
964 static constexpr bool is_byte_aligned() {
return alignment == BitRangeAlignment::byte_aligned; }
969 size_type size()
const {
return m_bitlength; }
974 size_type bits_to_read()
const {
return m_bitlength - m_read_bitpos + m_start_bitoffset; }
979 size_type bits_to_write()
const {
return m_bitlength - m_write_bitpos + m_start_bitoffset; }
986 template <std::
unsigned_
integral BlockT>
987 BlockT load_next()
const {
988 constexpr size_type block_size =
sizeof(BlockT);
989 constexpr size_type block_bits = block_size * 8;
990 const auto bits_remaining = bits_to_read();
992 BlockT result_block = 0;
993 if constexpr(is_byte_aligned()) {
994 result_block =
load_le(m_source.as_byte_span().subspan(read_bytepos()).
template first<block_size>());
996 const size_type byte_pos = read_bytepos();
997 const size_type bits_to_collect = std::min(block_bits, bits_to_read());
999 const uint8_t first_byte = m_source.as_byte_span()[byte_pos];
1002 result_block = BlockT(first_byte) >> m_unaligned_helper.padding_bits;
1005 if(m_unaligned_helper.bits_to_byte_alignment < bits_to_collect) {
1006 const BlockT block =
1007 load_le(m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>());
1008 result_block |= block << m_unaligned_helper.bits_to_byte_alignment;
1012 m_read_bitpos += std::min(block_bits, bits_remaining);
1013 return result_block;
1022 template <std::
unsigned_
integral BlockT>
1023 requires(!is_const())
1024 void store_next(BlockT block) {
1025 constexpr size_type block_size =
sizeof(BlockT);
1026 constexpr size_type block_bits = block_size * 8;
1028 if constexpr(is_byte_aligned()) {
1029 auto sink = m_source.as_byte_span().subspan(write_bytepos()).template first<block_size>();
1032 const size_type byte_pos = write_bytepos();
1033 const size_type bits_to_store = std::min(block_bits, bits_to_write());
1035 uint8_t& first_byte = m_source.as_byte_span()[byte_pos];
1038 first_byte = (first_byte & uint8_t(0xFF >> m_unaligned_helper.bits_to_byte_alignment)) |
1039 uint8_t(block << m_unaligned_helper.padding_bits);
1042 if(m_unaligned_helper.bits_to_byte_alignment < bits_to_store) {
1043 const auto remaining_bytes =
1044 m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>();
1045 const BlockT padding_mask = ~(BlockT(-1) >> m_unaligned_helper.bits_to_byte_alignment);
1046 const BlockT new_bytes =
1047 (
load_le(remaining_bytes) & padding_mask) | block >> m_unaligned_helper.bits_to_byte_alignment;
1048 store_le(remaining_bytes, new_bytes);
1052 m_write_bitpos += std::min(block_bits, bits_to_write());
1055 template <std::
unsigned_
integral BlockT>
1056 requires(is_byte_aligned() && !is_const())
1057 std::span<BlockT> span(
size_type blocks)
const {
1061 void* ptr =
reinterpret_cast<void*
>(m_source.as_byte_span().data() + read_bytepos());
1062 return {
reinterpret_cast<BlockT*
>(ptr), blocks};
1065 template <std::
unsigned_
integral BlockT>
1066 requires(is_byte_aligned() && is_const())
1067 std::span<const BlockT> span(
size_type blocks)
const {
1071 const void* ptr =
reinterpret_cast<const void*
>(m_source.as_byte_span().data() + read_bytepos());
1072 return {
reinterpret_cast<const BlockT*
>(ptr), blocks};
1076 requires(is_byte_aligned())
1078 m_read_bitpos += bytes * 8;
1079 m_write_bitpos += bytes * 8;
1082 template <std::
unsigned_
integral BlockT>
1083 requires(is_byte_aligned())
1084 size_t is_memory_aligned_to()
const {
1085 const void* cptr = m_source.as_byte_span().data() + read_bytepos();
1086 const void* ptr_before = cptr;
1091 void* ptr =
const_cast<void*
>(cptr);
1092 size_t size =
sizeof(BlockT);
1093 return ptr_before !=
nullptr && std::align(
alignof(BlockT), size, ptr, size) == ptr_before;
1097 size_type read_bytepos()
const {
return m_read_bitpos / 8; }
1099 size_type write_bytepos()
const {
return m_write_bitpos / 8; }
1102 BitvectorT& m_source;
1106 UnalignedDataHelper m_unaligned_helper;
1119 template <
typename FnT,
typename... ParamTs>
1120 requires detail::blockwise_processing_callback<FnT, ParamTs...>
1121 class blockwise_processing_callback_trait {
1123 constexpr static bool needs_mask = detail::blockwise_processing_callback_with_mask<FnT, ParamTs...>;
1124 constexpr static bool is_manipulator = detail::manipulating_blockwise_processing_callback<FnT, ParamTs...>;
1125 constexpr static bool is_predicate = detail::predicate_blockwise_processing_callback<FnT, ParamTs...>;
1126 static_assert(!is_manipulator || !is_predicate,
"cannot be manipulator and predicate at the same time");
1132 template <std::unsigned_integral... BlockTs>
1133 requires(all_same_v<std::remove_cv_t<BlockTs>...> &&
sizeof...(BlockTs) ==
sizeof...(ParamTs))
1134 constexpr static bool apply_on_full_blocks(FnT fn, std::span<BlockTs>... blocks) {
1136 const size_type iterations = detail::first(blocks...).size();
1137 for(
size_type i = 0; i < iterations; ++i) {
1138 if constexpr(is_predicate) {
1139 if(!apply(fn, bits, blocks[i]...)) {
1142 }
else if constexpr(is_manipulator) {
1143 detail::first(blocks...)[i] = apply(fn, bits, blocks[i]...);
1145 apply(fn, bits, blocks[i]...);
1154 template <std::unsigned_integral BlockT,
typename... BitRangeOperatorTs>
1155 requires(
sizeof...(BitRangeOperatorTs) ==
sizeof...(ParamTs))
1156 constexpr static bool apply_on_unaligned_blocks(FnT fn, BitRangeOperatorTs&... ops) {
1157 constexpr size_type block_bits =
sizeof(BlockT) * 8;
1158 auto bits = detail::first(ops...).bits_to_read();
1164 while(bits > block_bits * 2 - 8) {
1166 if constexpr(is_predicate) {
1167 if(!apply(fn, bits, ops.template load_next<BlockT>()...)) {
1170 }
else if constexpr(is_manipulator) {
1171 detail::first(ops...).store_next(apply(fn, bits, ops.template load_next<BlockT>()...));
1173 apply(fn, bits, ops.template load_next<BlockT>()...);
1180 template <std::unsigned_integral... BlockTs>
1181 requires(all_same_v<BlockTs...>)
1182 constexpr static auto apply(FnT fn,
size_type bits, BlockTs... blocks) {
1183 if constexpr(needs_mask) {
1186 return fn(blocks...);
1198 template <std::unsigned_integral BlockT,
typename FnT,
typename... BitRangeOperatorTs>
1199 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1200 sizeof...(BitRangeOperatorTs) > 0)
1201 static bool _process_in_fully_aligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
1202 constexpr size_type block_bytes =
sizeof(BlockT);
1203 constexpr size_type block_bits = block_bytes * 8;
1204 const size_type blocks = detail::first(ops...).bits_to_read() / block_bits;
1206 using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1207 const auto result = callback_trait::apply_on_full_blocks(fn, ops.template span<BlockT>(blocks)...);
1208 (ops.advance(block_bytes * blocks), ...);
1217 template <std::unsigned_integral BlockT,
typename FnT,
typename... BitRangeOperatorTs>
1218 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...>)
1219 static bool _process_in_unaligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
1220 using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1221 return callback_trait::template apply_on_unaligned_blocks<BlockT>(fn, ops...);
1237 template <
typename FnT,
typename... BitRangeOperatorTs>
1238 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1239 sizeof...(BitRangeOperatorTs) > 0)
1240 static bool range_operation(FnT fn, BitRangeOperatorTs... ops) {
1241 BOTAN_ASSERT(has_equal_lengths(ops...),
"all BitRangeOperators have the same length");
1243 if constexpr((BitRangeOperatorTs::is_byte_aligned() && ...)) {
1250 const bool alignment = (ops.template is_memory_aligned_to<uint64_t>() && ...);
1253 return _process_in_fully_aligned_blocks_of<uint64_t>(fn, ops...) &&
1254 _process_in_fully_aligned_blocks_of<uint32_t>(fn, ops...) &&
1255 _process_in_fully_aligned_blocks_of<uint16_t>(fn, ops...) &&
1256 _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
1258 return _process_in_unaligned_blocks_of<uint64_t>(fn, ops...) &&
1259 _process_in_unaligned_blocks_of<uint32_t>(fn, ops...) &&
1260 _process_in_unaligned_blocks_of<uint16_t>(fn, ops...) &&
1261 _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
1268 template <
typename FnT,
typename... BitvectorTs>
1269 requires(detail::blockwise_processing_callback<FnT, BitvectorTs...> &&
1270 (is_bitvector_v<std::remove_cvref_t<BitvectorTs>> && ... &&
true))
1271 static bool full_range_operation(FnT&& fn, BitvectorTs&... bitvecs) {
1272 BOTAN_ASSERT(has_equal_lengths(bitvecs...),
"all bitvectors have the same length");
1273 return range_operation(std::forward<FnT>(fn), BitRangeOperator<BitvectorTs>(bitvecs)...);
1276 template <
typename SomeT,
typename... SomeTs>
1277 static bool has_equal_lengths(
const SomeT& v,
const SomeTs&... vs) {
1278 return ((v.size() == vs.size()) && ... &&
true);
1281 template <std::
unsigned_
integral T>
1282 static constexpr T make_mask(
size_type bits) {
1283 const bool max = bits >=
sizeof(
T) * 8;
1285 return (
T(!max) << bits) - 1;
1288 auto as_byte_span() {
return std::span{m_blocks.data(), m_blocks.size() *
sizeof(
block_type)}; }
1290 auto as_byte_span()
const {
return std::span{m_blocks.data(), m_blocks.size() *
sizeof(
block_type)}; }
1294 std::vector<block_type, allocator_type> m_blocks;