246 template <
template <
typename>
typename FriendAllocatorT>
252 static constexpr size_type block_index_mask = (one << block_offset_shift) - 1;
254 static constexpr size_type block_index(
size_type pos) {
return pos >> block_offset_shift; }
256 static constexpr size_type block_offset(
size_type pos) {
return pos & block_index_mask; }
263 template <
typename BlockT>
264 requires std::same_as<block_type, std::remove_cv_t<BlockT>>
269 constexpr bitref_base(std::span<BlockT> blocks,
size_type pos) noexcept :
270 m_block(blocks[block_index(pos)]), m_mask(one << block_offset(pos)) {}
273 bitref_base() =
delete;
274 bitref_base(
const bitref_base&)
noexcept =
default;
275 bitref_base(bitref_base&&) noexcept = default;
276 bitref_base& operator=(const bitref_base&) = delete;
277 bitref_base& operator=(bitref_base&&) = delete;
279 ~bitref_base() = default;
283 constexpr operator
bool() const noexcept {
return is_set(); }
285 constexpr bool is_set() const noexcept {
return (m_block & m_mask) > 0; }
287 template <std::
unsigned_
integral T>
288 constexpr T
as() const noexcept {
289 return static_cast<T
>(is_set());
292 constexpr CT::Choice as_choice() const noexcept {
306 template <
typename BlockT>
307 class bitref final :
public bitref_base<BlockT> {
309 using bitref_base<BlockT>::bitref_base;
319 template <
typename BlockT>
320 requires(!std::is_const_v<BlockT>)
323 using bitref_base<BlockT>::bitref_base;
330 this->m_block |= this->m_mask;
335 this->m_block &= ~this->m_mask;
340 this->m_block ^= this->m_mask;
359 this->m_block &=
~CT::Mask<BlockT>::expand(other).if_not_set_return(this->m_mask);
393 std::optional<size_type> bits = std::nullopt) :
398 bitvector_base(std::initializer_list<block_type> blocks, std::optional<size_type> bits = std::nullopt) :
401 bool empty()
const {
return m_bits == 0; }
410 full_range_operation([&](std::unsigned_integral
auto block) { acc ^= block; }, *
this);
412 for(
size_t i = (
sizeof(acc) * 8) >> 1; i > 0; i >>= 1) {
425 full_range_operation([&](std::unsigned_integral
auto block) { acc +=
ct_popcount(block); }, *
this);
432 template <bitvectorish OutT>
440 template <bitvectorish OtherT>
442 return size() == other.size() &&
443 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) {
return lhs == rhs; },
451 template <bitvectorish OtherT>
452 bool equals(
const OtherT& other)
const noexcept {
453 return (*
this ^ other).none();
467 void from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
468 m_bits = bits.value_or(bytes.size_bytes() * 8);
469 BOTAN_ARG_CHECK(m_bits <= bytes.size_bytes() * 8,
"not enough data to load so many bits");
475 if(verbatim_blocks > 0) {
476 typecast_copy(std::span{m_blocks}.first(verbatim_blocks), bytes.first(verbatim_bytes));
480 for(
size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
481 ref(i) = ((bytes[i >> 3] & (uint8_t(1) << (i & 7))) != 0);
492 std::conditional_t<uses_secure_allocator, secure_vector<uint8_t>, std::vector<uint8_t>>>
506 BOTAN_ARG_CHECK(bytes_needed <= out.size_bytes(),
"Not enough space to render bitvector");
511 if(verbatim_blocks > 0) {
512 typecast_copy(out.first(verbatim_bytes), std::span{m_blocks}.first(verbatim_blocks));
517 for(
size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
518 out[i >> 3] |= ref(i).template
as<uint8_t>() << (i & 7);
527 std::stringstream ss;
544 const auto new_number_of_blocks = ceil_toblocks(bits);
545 if(new_number_of_blocks != m_blocks.size()) {
546 m_blocks.resize(new_number_of_blocks);
554 const auto i =
size();
584 auto front()
const {
return ref(0); }
605 full_range_operation(
606 [](std::unsigned_integral
auto block) ->
decltype(block) {
607 return static_cast<decltype(block)
>(~static_cast<decltype(block)>(0));
628 full_range_operation(
629 [](std::unsigned_integral
auto block) ->
decltype(block) {
return static_cast<decltype(block)
>(0); },
648 full_range_operation([](std::unsigned_integral
auto block) ->
decltype(block) {
return ~block; }, *
this);
657 return full_range_operation([](std::unsigned_integral
auto block) {
return block == 0; }, *
this);
679 return full_range_operation(
680 []<std::unsigned_integral BlockT>(BlockT block, BlockT mask) {
return block == mask; }, *
this);
702 template <bitvectorish OutT = bitvector_base<AllocatorT>>
715 newvector_unwrapped.m_blocks,
716 std::span{m_blocks}.subspan(block_index(pos), block_index(pos +
bitlen - 1) - block_index(pos) + 1));
718 BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> from_op(
720 BitRangeOperator<strong_type_wrapped_type<OutT>> to_op(
722 range_operation([](
auto ,
auto from) {
return from; }, to_op, from_op);
725 newvector_unwrapped.zero_unused_bits();
740 template <
typename OutT>
741 requires(std::unsigned_integral<strong_type_wrapped_type<OutT>> &&
742 !std::same_as<bool, strong_type_wrapped_type<OutT>>)
745 constexpr size_t bits =
sizeof(result_t) * 8;
750 out =
load_le<result_t>(std::span{m_blocks}.subspan(block_index(pos)).
template first<
sizeof(result_t)>());
752 BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*
this, pos, bits);
754 [&](std::unsigned_integral
auto integer) {
755 if constexpr(std::same_as<result_t,
decltype(integer)>) {
777 template <
typename InT>
778 requires(std::unsigned_integral<strong_type_wrapped_type<InT>> && !std::same_as<bool, InT>)
781 constexpr size_t bits =
sizeof(in_t) * 8;
785 store_le(std::span{m_blocks}.subspan(block_index(pos)).
template first<
sizeof(in_t)>(),
788 BitRangeOperator<bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*
this, pos, bits);
790 [&]<std::unsigned_integral BlockT>(BlockT block) -> BlockT {
791 if constexpr(std::same_as<in_t, BlockT>) {
819 template <bitvectorish OtherT>
821 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT {
return lhs | rhs; },
827 template <bitvectorish OtherT>
829 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT {
return lhs & rhs; },
835 template <bitvectorish OtherT>
837 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT {
return lhs ^ rhs; },
858 template <bitvectorish OtherT>
865 return lhs ^ m.if_set_return(rhs);
868 return lhs ^ m.if_set_return(rhs);
871 return lhs ^ m.if_set_return(rhs);
874 return lhs ^ m.if_set_return(rhs);
912 void zero_unused_bits() {
913 const auto first_unused_bit =
size();
917 const block_type mask = (one << block_offset(first_unused_bit)) - one;
918 m_blocks[block_index(first_unused_bit)] &= mask;
931 enum class BitRangeAlignment : uint8_t { byte_aligned, no_alignment };
944 template <
typename BitvectorT, auto alignment = BitRangeAlignment::
byte_aligned>
945 requires is_bitvector_v<std::remove_cvref_t<BitvectorT>>
946 class BitRangeOperator {
948 constexpr static bool is_const() {
return std::is_const_v<BitvectorT>; }
950 struct UnalignedDataHelper {
951 const uint8_t padding_bits;
952 const uint8_t bits_to_byte_alignment;
956 BitRangeOperator(BitvectorT& source,
size_type start_bitoffset,
size_type bitlength) :
958 m_start_bitoffset(start_bitoffset),
959 m_bitlength(bitlength),
960 m_unaligned_helper({.padding_bits =
static_cast<uint8_t
>(start_bitoffset % 8),
961 .bits_to_byte_alignment =
static_cast<uint8_t
>(8 - (start_bitoffset % 8))}),
962 m_read_bitpos(start_bitoffset),
963 m_write_bitpos(start_bitoffset) {
964 BOTAN_ASSERT(is_byte_aligned() == (m_start_bitoffset % 8 == 0),
"byte alignment guarantee");
965 BOTAN_ASSERT(m_source.size() >= m_start_bitoffset + m_bitlength,
"enough bytes in underlying source");
968 explicit BitRangeOperator(BitvectorT& source) : BitRangeOperator(source, 0, source.size()) {}
970 static constexpr bool is_byte_aligned() {
return alignment == BitRangeAlignment::byte_aligned; }
980 size_type bits_to_read()
const {
return m_bitlength - m_read_bitpos + m_start_bitoffset; }
985 size_type bits_to_write()
const {
return m_bitlength - m_write_bitpos + m_start_bitoffset; }
992 template <std::
unsigned_
integral BlockT>
993 BlockT load_next()
const {
994 constexpr size_type block_size =
sizeof(BlockT);
995 constexpr size_type block_bits = block_size * 8;
996 const auto bits_remaining = bits_to_read();
998 BlockT result_block = 0;
999 if constexpr(is_byte_aligned()) {
1000 result_block =
load_le(m_source.as_byte_span().subspan(read_bytepos()).
template first<block_size>());
1002 const size_type byte_pos = read_bytepos();
1003 const size_type bits_to_collect = std::min(block_bits, bits_to_read());
1005 const uint8_t first_byte = m_source.as_byte_span()[byte_pos];
1008 result_block = BlockT(first_byte) >> m_unaligned_helper.padding_bits;
1011 if(m_unaligned_helper.bits_to_byte_alignment < bits_to_collect) {
1012 const BlockT block =
1013 load_le(m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>());
1014 result_block |= block << m_unaligned_helper.bits_to_byte_alignment;
1018 m_read_bitpos += std::min(block_bits, bits_remaining);
1019 return result_block;
1028 template <std::
unsigned_
integral BlockT>
1029 requires(!is_const())
1030 void store_next(BlockT block) {
1031 constexpr size_type block_size =
sizeof(BlockT);
1032 constexpr size_type block_bits = block_size * 8;
1034 if constexpr(is_byte_aligned()) {
1035 auto sink = m_source.as_byte_span().subspan(write_bytepos()).template first<block_size>();
1038 const size_type byte_pos = write_bytepos();
1039 const size_type bits_to_store = std::min(block_bits, bits_to_write());
1041 uint8_t& first_byte = m_source.as_byte_span()[byte_pos];
1044 first_byte = (first_byte & uint8_t(0xFF >> m_unaligned_helper.bits_to_byte_alignment)) |
1045 uint8_t(block << m_unaligned_helper.padding_bits);
1048 if(m_unaligned_helper.bits_to_byte_alignment < bits_to_store) {
1049 const auto remaining_bytes =
1050 m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>();
1051 const BlockT padding_mask = ~(BlockT(-1) >> m_unaligned_helper.bits_to_byte_alignment);
1052 const BlockT new_bytes =
1053 (
load_le(remaining_bytes) & padding_mask) | block >> m_unaligned_helper.bits_to_byte_alignment;
1054 store_le(remaining_bytes, new_bytes);
1058 m_write_bitpos += std::min(block_bits, bits_to_write());
1061 template <std::
unsigned_
integral BlockT>
1062 requires(is_byte_aligned() && !is_const())
1063 std::span<BlockT> span(
size_type blocks)
const {
1067 void* ptr =
reinterpret_cast<void*
>(m_source.as_byte_span().data() + read_bytepos());
1068 return {
reinterpret_cast<BlockT*
>(ptr), blocks};
1071 template <std::
unsigned_
integral BlockT>
1072 requires(is_byte_aligned() && is_const())
1073 std::span<const BlockT> span(
size_type blocks)
const {
1077 const void* ptr =
reinterpret_cast<const void*
>(m_source.as_byte_span().data() + read_bytepos());
1078 return {
reinterpret_cast<const BlockT*
>(ptr), blocks};
1082 requires(is_byte_aligned())
1084 m_read_bitpos += bytes * 8;
1085 m_write_bitpos += bytes * 8;
1088 template <std::
unsigned_
integral BlockT>
1089 requires(is_byte_aligned())
1090 size_t is_memory_aligned_to()
const {
1091 const void* cptr = m_source.as_byte_span().data() + read_bytepos();
1092 const void* ptr_before = cptr;
1097 void* ptr =
const_cast<void*
>(cptr);
1098 size_t size =
sizeof(BlockT);
1099 return ptr_before !=
nullptr && std::align(
alignof(BlockT), size, ptr, size) == ptr_before;
1103 size_type read_bytepos()
const {
return m_read_bitpos / 8; }
1105 size_type write_bytepos()
const {
return m_write_bitpos / 8; }
1108 BitvectorT& m_source;
1112 UnalignedDataHelper m_unaligned_helper;
1125 template <
typename FnT,
typename... ParamTs>
1126 requires detail::blockwise_processing_callback<FnT, ParamTs...>
1127 class blockwise_processing_callback_trait {
1129 constexpr static bool needs_mask = detail::blockwise_processing_callback_with_mask<FnT, ParamTs...>;
1130 constexpr static bool is_manipulator = detail::manipulating_blockwise_processing_callback<FnT, ParamTs...>;
1131 constexpr static bool is_predicate = detail::predicate_blockwise_processing_callback<FnT, ParamTs...>;
1132 static_assert(!is_manipulator || !is_predicate,
"cannot be manipulator and predicate at the same time");
1138 template <std::unsigned_integral... BlockTs>
1139 requires(all_same_v<std::remove_cv_t<BlockTs>...> &&
sizeof...(BlockTs) ==
sizeof...(ParamTs))
1140 constexpr static bool apply_on_full_blocks(FnT fn, std::span<BlockTs>... blocks) {
1142 const size_type iterations = detail::first(blocks...).size();
1143 for(
size_type i = 0; i < iterations; ++i) {
1144 if constexpr(is_predicate) {
1145 if(!apply(fn, bits, blocks[i]...)) {
1148 }
else if constexpr(is_manipulator) {
1149 detail::first(blocks...)[i] = apply(fn, bits, blocks[i]...);
1151 apply(fn, bits, blocks[i]...);
1160 template <std::unsigned_integral BlockT,
typename... BitRangeOperatorTs>
1161 requires(
sizeof...(BitRangeOperatorTs) ==
sizeof...(ParamTs))
1162 constexpr static bool apply_on_unaligned_blocks(FnT fn, BitRangeOperatorTs&... ops) {
1163 constexpr size_type block_bits =
sizeof(BlockT) * 8;
1164 auto bits = detail::first(ops...).bits_to_read();
1170 while(bits > block_bits * 2 - 8) {
1172 if constexpr(is_predicate) {
1173 if(!apply(fn, bits, ops.template load_next<BlockT>()...)) {
1176 }
else if constexpr(is_manipulator) {
1177 detail::first(ops...).store_next(apply(fn, bits, ops.template load_next<BlockT>()...));
1179 apply(fn, bits, ops.template load_next<BlockT>()...);
1186 template <std::unsigned_integral... BlockTs>
1187 requires(all_same_v<BlockTs...>)
1188 constexpr static auto apply(FnT fn,
size_type bits, BlockTs... blocks) {
1189 if constexpr(needs_mask) {
1192 return fn(blocks...);
1204 template <std::unsigned_integral BlockT,
typename FnT,
typename... BitRangeOperatorTs>
1205 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1206 sizeof...(BitRangeOperatorTs) > 0)
1207 static bool _process_in_fully_aligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
1208 constexpr size_type block_bytes =
sizeof(BlockT);
1209 constexpr size_type block_bits = block_bytes * 8;
1210 const size_type blocks = detail::first(ops...).bits_to_read() / block_bits;
1212 using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1213 const auto result = callback_trait::apply_on_full_blocks(fn, ops.template span<BlockT>(blocks)...);
1214 (ops.advance(block_bytes * blocks), ...);
1223 template <std::unsigned_integral BlockT,
typename FnT,
typename... BitRangeOperatorTs>
1224 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...>)
1225 static bool _process_in_unaligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
1226 using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1227 return callback_trait::template apply_on_unaligned_blocks<BlockT>(fn, ops...);
1243 template <
typename FnT,
typename... BitRangeOperatorTs>
1244 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1245 sizeof...(BitRangeOperatorTs) > 0)
1246 static bool range_operation(FnT fn, BitRangeOperatorTs... ops) {
1247 BOTAN_ASSERT(has_equal_lengths(ops...),
"all BitRangeOperators have the same length");
1249 if constexpr((BitRangeOperatorTs::is_byte_aligned() && ...)) {
1256 const bool alignment = (ops.template is_memory_aligned_to<uint64_t>() && ...);
1259 return _process_in_fully_aligned_blocks_of<uint64_t>(fn, ops...) &&
1260 _process_in_fully_aligned_blocks_of<uint32_t>(fn, ops...) &&
1261 _process_in_fully_aligned_blocks_of<uint16_t>(fn, ops...) &&
1262 _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
1264 return _process_in_unaligned_blocks_of<uint64_t>(fn, ops...) &&
1265 _process_in_unaligned_blocks_of<uint32_t>(fn, ops...) &&
1266 _process_in_unaligned_blocks_of<uint16_t>(fn, ops...) &&
1267 _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
1274 template <
typename FnT,
typename... BitvectorTs>
1275 requires(detail::blockwise_processing_callback<FnT, BitvectorTs...> &&
1276 (is_bitvector_v<std::remove_cvref_t<BitvectorTs>> && ... &&
true))
1277 static bool full_range_operation(FnT&& fn, BitvectorTs&... bitvecs) {
1278 BOTAN_ASSERT(has_equal_lengths(bitvecs...),
"all bitvectors have the same length");
1279 return range_operation(std::forward<FnT>(fn), BitRangeOperator<BitvectorTs>(bitvecs)...);
1282 template <
typename SomeT,
typename... SomeTs>
1283 static bool has_equal_lengths(
const SomeT& v,
const SomeTs&... vs) {
1284 return ((v.size() == vs.size()) && ... &&
true);
1287 template <std::
unsigned_
integral T>
1288 static constexpr T make_mask(
size_type bits) {
1289 const bool max = bits >=
sizeof(T) * 8;
1291 return (T(!max) << bits) - 1;
1294 auto as_byte_span() {
return std::span{m_blocks.data(), m_blocks.size() *
sizeof(
block_type)}; }
1296 auto as_byte_span()
const {
return std::span{m_blocks.data(), m_blocks.size() *
sizeof(
block_type)}; }
1300 std::vector<block_type, allocator_type> m_blocks;