Botan 3.9.0
Crypto and TLS for C&
bitvector.h
Go to the documentation of this file.
1/*
2 * An abstraction for an arbitrarily large bitvector that can
3 * optionally use the secure_allocator. All bitwise accesses and all
4 * constructors are implemented in constant time. Otherwise, only methods
5 * with the "ct_" pre-fix run in constant time.
6 *
7 * (C) 2023-2024 Jack Lloyd
8 * (C) 2023-2024 René Meusel, Rohde & Schwarz Cybersecurity
9 *
10 * Botan is released under the Simplified BSD License (see license.txt)
11 */
12
13#ifndef BOTAN_BIT_VECTOR_H_
14#define BOTAN_BIT_VECTOR_H_
15
16#include <botan/concepts.h>
17#include <botan/exceptn.h>
18#include <botan/mem_ops.h>
19#include <botan/secmem.h>
20#include <botan/strong_type.h>
21#include <botan/internal/bit_ops.h>
22#include <botan/internal/ct_utils.h>
23#include <botan/internal/loadstor.h>
24#include <botan/internal/stl_util.h>
25
26#include <memory>
27#include <optional>
28#include <span>
29#include <sstream>
30#include <string>
31#include <utility>
32#include <vector>
33
34namespace Botan {
35
36template <template <typename> typename AllocatorT>
37class bitvector_base;
38
39template <typename T>
40struct is_bitvector : std::false_type {};
41
42template <template <typename> typename T>
43struct is_bitvector<bitvector_base<T>> : std::true_type {};
44
45template <typename T>
46constexpr static bool is_bitvector_v = is_bitvector<T>::value;
47
48template <typename T>
49concept bitvectorish = is_bitvector_v<strong_type_wrapped_type<T>>;
50
51namespace detail {
52
53template <typename T0, typename... Ts>
54struct first_type {
55 using type = T0;
56};
57
58// get the first type from a parameter pack
59// TODO: C++26 will bring Parameter Pack indexing:
60// using first_t = Ts...[0];
61template <typename... Ts>
62 requires(sizeof...(Ts) > 0)
64
65// get the first object from a parameter pack
66// TODO: C++26 will bring Parameter Pack indexing:
67// auto first = s...[0];
68template <typename T0, typename... Ts>
69constexpr static first_t<T0, Ts...> first(T0&& t, Ts&&... /*rest*/) {
70 return std::forward<T0>(t);
71}
72
73template <typename OutT, typename>
74using as = OutT;
75
76template <typename FnT, std::unsigned_integral BlockT, typename... ParamTs>
77using blockwise_processing_callback_return_type = std::invoke_result_t<FnT, as<BlockT, ParamTs>...>;
78
79template <typename FnT, typename BlockT, typename... ParamTs>
81 std::unsigned_integral<BlockT> &&
82 (std::same_as<BlockT, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>> ||
83 std::same_as<bool, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>> ||
84 std::same_as<void, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>>);
85
86template <typename FnT, typename... ParamTs>
88 is_blockwise_processing_callback_return_type<FnT, uint8_t, ParamTs...> &&
89 is_blockwise_processing_callback_return_type<FnT, uint16_t, ParamTs...> &&
90 is_blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...> &&
91 is_blockwise_processing_callback_return_type<FnT, uint64_t, ParamTs...>;
92
93template <typename FnT, typename... ParamTs>
95 is_blockwise_processing_callback_return_type<FnT, uint8_t, ParamTs..., uint8_t /* mask */> &&
96 is_blockwise_processing_callback_return_type<FnT, uint16_t, ParamTs..., uint16_t /* mask */> &&
97 is_blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., uint32_t /* mask */> &&
98 is_blockwise_processing_callback_return_type<FnT, uint64_t, ParamTs..., uint64_t /* mask */>;
99
100/**
101 * Defines the callback constraints for the BitRangeOperator. For further
102 * details, see bitvector_base::range_operation().
103 */
104template <typename FnT, typename... ParamTs>
107
108template <typename FnT, typename... ParamTs>
111 std::same_as<uint32_t, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...>>) ||
113 std::same_as<uint32_t, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., first_t<ParamTs...>>>);
114
115template <typename FnT, typename... ParamTs>
118 std::same_as<bool, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...>>) ||
120 std::same_as<bool, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., first_t<ParamTs...>>>);
121
122template <typename T>
124 private:
125 using size_type = typename T::size_type;
126
127 public:
128 using difference_type = std::make_signed_t<size_type>;
129 using value_type = std::remove_const_t<decltype(std::declval<T>().at(0))>;
132
133 // TODO: technically, this could be a random access iterator
134 using iterator_category = std::bidirectional_iterator_tag;
135
136 public:
139
140 bitvector_iterator(T* bitvector, size_t offset) : m_bitvector(bitvector) { update(offset); }
141
142 bitvector_iterator(const bitvector_iterator& other) noexcept : m_bitvector(other.m_bitvector) {
143 update(other.m_offset);
144 }
145
146 bitvector_iterator(bitvector_iterator&& other) noexcept : m_bitvector(other.m_bitvector) {
147 update(other.m_offset);
148 }
149
151 if(this != &other) {
152 m_bitvector = other.m_bitvector;
153 update(other.m_offset);
154 }
155 return *this;
156 }
157
159 m_bitvector = other.m_bitvector;
160 update(other.m_offset);
161 return *this;
162 }
163
165 update(signed_offset() + 1);
166 return *this;
167 }
168
170 auto copy = *this;
171 update(signed_offset() + 1);
172 return copy;
173 }
174
176 update(signed_offset() - 1);
177 return *this;
178 }
179
181 auto copy = *this;
182 update(signed_offset() - 1);
183 return copy;
184 }
185
186 std::partial_ordering operator<=>(const bitvector_iterator& other) const noexcept {
187 if(m_bitvector == other.m_bitvector) {
188 return m_offset <=> other.m_offset;
189 } else {
190 return std::partial_ordering::unordered;
191 }
192 }
193
194 bool operator==(const bitvector_iterator& other) const noexcept {
195 return m_bitvector == other.m_bitvector && m_offset == other.m_offset;
196 }
197
198 reference operator*() const { return m_bitref.value(); }
199
200 pointer operator->() const { return &(m_bitref.value()); }
201
202 private:
203 void update(size_type new_offset) {
204 m_offset = new_offset;
205 if(m_offset < m_bitvector->size()) {
206 m_bitref.emplace((*m_bitvector)[m_offset]);
207 } else {
208 // end() iterator
209 m_bitref.reset();
210 }
211 }
212
213 difference_type signed_offset() const { return static_cast<difference_type>(m_offset); }
214
215 private:
216 T* m_bitvector;
217 size_type m_offset;
218 mutable std::optional<value_type> m_bitref;
219};
220
221} // namespace detail
222
223/**
224 * An arbitrarily large bitvector with typical bit manipulation and convenient
225 * bitwise access methods. Don't use `bitvector_base` directly, but the type
226 * aliases::
227 *
228 * * bitvector - with a standard allocator
229 * * secure_bitvector - with a secure allocator that auto-scrubs the memory
230 */
231template <template <typename> typename AllocatorT>
232class bitvector_base final {
233 public:
234 using block_type = uint8_t;
235 using size_type = size_t;
236 using allocator_type = AllocatorT<block_type>;
240
241 static constexpr size_type block_size_bytes = sizeof(block_type);
243 static constexpr bool uses_secure_allocator = std::is_same_v<allocator_type, secure_allocator<block_type>>;
244
245 private:
246 template <template <typename> typename FriendAllocatorT>
247 friend class bitvector_base;
248
249 static constexpr block_type one = block_type(1);
250
251 static constexpr size_type block_offset_shift = size_type(3) + ceil_log2(block_size_bytes);
252 static constexpr size_type block_index_mask = (one << block_offset_shift) - 1;
253
254 static constexpr size_type block_index(size_type pos) { return pos >> block_offset_shift; }
255
256 static constexpr size_type block_offset(size_type pos) { return pos & block_index_mask; }
257
258 private:
259 /**
260 * Internal helper to wrap a single bit in the bitvector and provide
261 * certain convenience access methods.
262 */
263 template <typename BlockT>
264 requires std::same_as<block_type, std::remove_cv_t<BlockT>>
265 class bitref_base {
266 private:
267 friend class bitvector_base<AllocatorT>;
268
269 constexpr bitref_base(std::span<BlockT> blocks, size_type pos) noexcept :
270 m_block(blocks[block_index(pos)]), m_mask(one << block_offset(pos)) {}
271
272 public:
273 bitref_base() = delete;
274 bitref_base(const bitref_base&) noexcept = default;
275 bitref_base(bitref_base&&) noexcept = default;
276 bitref_base& operator=(const bitref_base&) = delete;
277 bitref_base& operator=(bitref_base&&) = delete;
278
279 ~bitref_base() = default;
280
281 public:
282 // NOLINTNEXTLINE(*-explicit-conversions) FIXME
283 constexpr operator bool() const noexcept { return is_set(); }
284
285 constexpr bool is_set() const noexcept { return (m_block & m_mask) > 0; }
286
287 template <std::unsigned_integral T>
288 constexpr T as() const noexcept {
289 return static_cast<T>(is_set());
290 }
291
292 constexpr CT::Choice as_choice() const noexcept {
293 return CT::Choice::from_int(static_cast<BlockT>(m_block & m_mask));
294 }
295
296 protected:
297 BlockT& m_block; // NOLINT(*-non-private-member-variable*)
298 BlockT m_mask; // NOLINT(*-non-private-member-variable*)
299 };
300
301 public:
302 /**
303 * Wraps a constant reference into the bitvector. Bit can be accessed
304 * but not modified.
305 */
306 template <typename BlockT>
307 class bitref final : public bitref_base<BlockT> {
308 public:
309 using bitref_base<BlockT>::bitref_base;
310 };
311
312 /**
313 * Wraps a modifiable reference into the bitvector. Bit may be accessed
314 * and modified (e.g. flipped or XOR'ed).
315 *
316 * Constant-time operations are used for the bit manipulations. The
317 * location of the bit in the bit vector may be leaked, though.
318 */
319 template <typename BlockT>
320 requires(!std::is_const_v<BlockT>)
321 class bitref<BlockT> : public bitref_base<BlockT> {
322 public:
323 using bitref_base<BlockT>::bitref_base;
324
325 ~bitref() = default;
326 bitref(const bitref&) noexcept = default;
327 bitref(bitref&&) noexcept = default;
328
329 constexpr bitref& set() noexcept {
330 this->m_block |= this->m_mask;
331 return *this;
332 }
333
334 constexpr bitref& unset() noexcept {
335 this->m_block &= ~this->m_mask;
336 return *this;
337 }
338
339 constexpr bitref& flip() noexcept {
340 this->m_block ^= this->m_mask;
341 return *this;
342 }
343
344 // NOLINTBEGIN
345
346 constexpr bitref& operator=(bool bit) noexcept {
347 this->m_block =
348 CT::Mask<BlockT>::expand(bit).select(this->m_mask | this->m_block, this->m_block & ~this->m_mask);
349 return *this;
350 }
351
352 constexpr bitref& operator=(const bitref& bit) noexcept { return *this = bit.is_set(); }
353
354 constexpr bitref& operator=(bitref&& bit) noexcept { return *this = bit.is_set(); }
355
356 // NOLINTEND
357
358 constexpr bitref& operator&=(bool other) noexcept {
359 this->m_block &= ~CT::Mask<BlockT>::expand(other).if_not_set_return(this->m_mask);
360 return *this;
361 }
362
363 constexpr bitref& operator|=(bool other) noexcept {
364 this->m_block |= CT::Mask<BlockT>::expand(other).if_set_return(this->m_mask);
365 return *this;
366 }
367
368 constexpr bitref& operator^=(bool other) noexcept {
369 this->m_block ^= CT::Mask<BlockT>::expand(other).if_set_return(this->m_mask);
370 return *this;
371 }
372 };
373
374 public:
375 bitvector_base() : m_bits(0) {}
376
377 explicit bitvector_base(size_type bits) : m_bits(bits), m_blocks(ceil_toblocks(bits)) {}
378
379 /**
380 * Initialize the bitvector from a byte-array. Bits are taken byte-wise
381 * from least significant to most significant. Example::
382 *
383 * bitvector[0] -> LSB(Byte[0])
384 * bitvector[1] -> LSB+1(Byte[0])
385 * ...
386 * bitvector[8] -> LSB(Byte[1])
387 *
388 * @param bytes The byte vector to be loaded
389 * @param bits The number of bits to be loaded. This must not be more
390 * than the number of bytes in @p bytes.
391 */
392 bitvector_base(std::span<const uint8_t> bytes, /* NOLINT(*-explicit-conversions) FIXME */
393 std::optional<size_type> bits = std::nullopt) :
394 m_bits() {
395 from_bytes(bytes, bits);
396 }
397
398 bitvector_base(std::initializer_list<block_type> blocks, std::optional<size_type> bits = std::nullopt) :
399 m_bits(bits.value_or(blocks.size() * block_size_bits)), m_blocks(blocks.begin(), blocks.end()) {}
400
401 bool empty() const { return m_bits == 0; }
402
403 size_type size() const { return m_bits; }
404
405 /**
406 * @returns true iff the number of 1-bits in this is odd, false otherwise (constant time)
407 */
409 uint64_t acc = 0;
410 full_range_operation([&](std::unsigned_integral auto block) { acc ^= block; }, *this);
411
412 for(size_t i = (sizeof(acc) * 8) >> 1; i > 0; i >>= 1) {
413 acc ^= acc >> i;
414 }
415
416 return CT::Choice::from_int(acc & one);
417 }
418
419 /**
420 * Counts the number of 1-bits in the bitvector in constant time.
421 * @returns the "population count" (or hamming weight) of the bitvector
422 */
424 size_type acc = 0;
425 full_range_operation([&](std::unsigned_integral auto block) { acc += ct_popcount(block); }, *this);
426 return acc;
427 }
428
429 /**
430 * @returns copies this bitvector into a new bitvector of type @p OutT
431 */
432 template <bitvectorish OutT>
433 OutT as() const {
434 return subvector<OutT>(0, size());
435 }
436
437 /**
438 * @returns true if @p other contains the same bit pattern as this
439 */
440 template <bitvectorish OtherT>
441 bool equals_vartime(const OtherT& other) const noexcept {
442 return size() == other.size() &&
443 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) { return lhs == rhs; },
444 *this,
445 unwrap_strong_type(other));
446 }
447
448 /**
449 * @returns true if @p other contains the same bit pattern as this
450 */
451 template <bitvectorish OtherT>
452 bool equals(const OtherT& other) const noexcept {
453 return (*this ^ other).none();
454 }
455
456 /// @name Serialization
457 /// @{
458
459 /**
460 * Re-initialize the bitvector with the given bytes. See the respective
461 * constructor for details. This should be used only when trying to save
462 * allocations. Otherwise, use the constructor.
463 *
464 * @param bytes the byte range to load bits from
465 * @param bits (optional) if not all @p bytes should be loaded in full
466 */
467 void from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
468 m_bits = bits.value_or(bytes.size_bytes() * 8);
469 BOTAN_ARG_CHECK(m_bits <= bytes.size_bytes() * 8, "not enough data to load so many bits");
470 resize(m_bits);
471
472 // load as much aligned data as possible
473 const auto verbatim_blocks = m_bits / block_size_bits;
474 const auto verbatim_bytes = verbatim_blocks * block_size_bytes;
475 if(verbatim_blocks > 0) {
476 typecast_copy(std::span{m_blocks}.first(verbatim_blocks), bytes.first(verbatim_bytes));
477 }
478
479 // load remaining unaligned data
480 for(size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
481 ref(i) = ((bytes[i >> 3] & (uint8_t(1) << (i & 7))) != 0);
482 }
483 }
484
485 /**
486 * Renders the bitvector into a byte array. By default, this will use
487 * `std::vector<uint8_t>` or `Botan::secure_vector<uint8_t>`, depending on
488 * the allocator used by the bitvector. The rendering is compatible with
489 * the bit layout explained in the respective constructor.
490 */
491 template <concepts::resizable_byte_buffer OutT =
492 std::conditional_t<uses_secure_allocator, secure_vector<uint8_t>, std::vector<uint8_t>>>
493 OutT to_bytes() const {
494 OutT out(ceil_tobytes(m_bits));
495 to_bytes(out);
496 return out;
497 }
498
499 /**
500 * Renders the bitvector into a properly sized byte range.
501 *
502 * @param out a byte range that has a length of at least `ceil_tobytes(size())`.
503 */
504 void to_bytes(std::span<uint8_t> out) const {
505 const auto bytes_needed = ceil_tobytes(m_bits);
506 BOTAN_ARG_CHECK(bytes_needed <= out.size_bytes(), "Not enough space to render bitvector");
507
508 // copy as much aligned data as possible
509 const auto verbatim_blocks = m_bits / block_size_bits;
510 const auto verbatim_bytes = verbatim_blocks * block_size_bytes;
511 if(verbatim_blocks > 0) {
512 typecast_copy(out.first(verbatim_bytes), std::span{m_blocks}.first(verbatim_blocks));
513 }
514
515 // copy remaining unaligned data
516 clear_mem(out.subspan(verbatim_bytes));
517 for(size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
518 out[i >> 3] |= ref(i).template as<uint8_t>() << (i & 7);
519 }
520 }
521
522 /**
523 * Renders this bitvector into a sequence of "0"s and "1"s.
524 * This is meant for debugging purposes and is not efficient.
525 */
526 std::string to_string() const {
527 std::stringstream ss;
528 for(size_type i = 0; i < size(); ++i) {
529 ss << ref(i);
530 }
531 return ss.str();
532 }
533
534 /// @}
535
536 /// @name Capacity Accessors and Modifiers
537 /// @{
538
539 size_type capacity() const { return m_blocks.capacity() * block_size_bits; }
540
541 void reserve(size_type bits) { m_blocks.reserve(ceil_toblocks(bits)); }
542
543 void resize(size_type bits) {
544 const auto new_number_of_blocks = ceil_toblocks(bits);
545 if(new_number_of_blocks != m_blocks.size()) {
546 m_blocks.resize(new_number_of_blocks);
547 }
548
549 m_bits = bits;
550 zero_unused_bits();
551 }
552
553 void push_back(bool bit) {
554 const auto i = size();
555 resize(i + 1);
556 ref(i) = bit;
557 }
558
559 void pop_back() {
560 if(!empty()) {
561 resize(size() - 1);
562 }
563 }
564
565 /// @}
566
567 /// @name Bitwise and Global Accessors and Modifiers
568 /// @{
569
570 auto at(size_type pos) {
571 check_offset(pos);
572 return ref(pos);
573 }
574
575 // TODO C++23: deducing this
576 auto at(size_type pos) const {
577 check_offset(pos);
578 return ref(pos);
579 }
580
581 auto front() { return ref(0); }
582
583 // TODO C++23: deducing this
584 auto front() const { return ref(0); }
585
586 auto back() { return ref(size() - 1); }
587
588 // TODO C++23: deducing this
589 auto back() const { return ref(size() - 1); }
590
591 /**
592 * Sets the bit at position @p pos.
593 * @throws Botan::Invalid_Argument if @p pos is out of range
594 */
596 check_offset(pos);
597 ref(pos).set();
598 return *this;
599 }
600
601 /**
602 * Sets all currently allocated bits.
603 */
605 full_range_operation(
606 [](std::unsigned_integral auto block) -> decltype(block) {
607 return static_cast<decltype(block)>(~static_cast<decltype(block)>(0));
608 },
609 *this);
610 zero_unused_bits();
611 return *this;
612 }
613
614 /**
615 * Unsets the bit at position @p pos.
616 * @throws Botan::Invalid_Argument if @p pos is out of range
617 */
619 check_offset(pos);
620 ref(pos).unset();
621 return *this;
622 }
623
624 /**
625 * Unsets all currently allocated bits.
626 */
628 full_range_operation(
629 [](std::unsigned_integral auto block) -> decltype(block) { return static_cast<decltype(block)>(0); },
630 *this);
631 return *this;
632 }
633
634 /**
635 * Flips the bit at position @p pos.
636 * @throws Botan::Invalid_Argument if @p pos is out of range
637 */
639 check_offset(pos);
640 ref(pos).flip();
641 return *this;
642 }
643
644 /**
645 * Flips all currently allocated bits.
646 */
648 full_range_operation([](std::unsigned_integral auto block) -> decltype(block) { return ~block; }, *this);
649 zero_unused_bits();
650 return *this;
651 }
652
653 /**
654 * @returns true iff no bit is set
655 */
656 bool none_vartime() const {
657 return full_range_operation([](std::unsigned_integral auto block) { return block == 0; }, *this);
658 }
659
660 /**
661 * @returns true iff no bit is set in constant time
662 */
663 bool none() const { return hamming_weight() == 0; }
664
665 /**
666 * @returns true iff at least one bit is set
667 */
668 bool any_vartime() const { return !none_vartime(); }
669
670 /**
671 * @returns true iff at least one bit is set in constant time
672 */
673 bool any() const { return !none(); }
674
675 /**
676 * @returns true iff all bits are set
677 */
678 bool all_vartime() const {
679 return full_range_operation(
680 []<std::unsigned_integral BlockT>(BlockT block, BlockT mask) { return block == mask; }, *this);
681 }
682
683 /**
684 * @returns true iff all bits are set in constant time
685 */
686 bool all() const { return hamming_weight() == m_bits; }
687
688 auto operator[](size_type pos) { return ref(pos); }
689
690 // TODO C++23: deducing this
691 auto operator[](size_type pos) const { return ref(pos); }
692
693 /// @}
694
695 /// @name Subvectors
696 /// @{
697
698 /**
699 * Creates a new bitvector with a subsection of this bitvector starting at
700 * @p pos copying exactly @p length bits.
701 */
702 template <bitvectorish OutT = bitvector_base<AllocatorT>>
703 auto subvector(size_type pos, std::optional<size_type> length = std::nullopt) const {
704 size_type bitlen = length.value_or(size() - pos);
705 BOTAN_ARG_CHECK(pos + bitlen <= size(), "Not enough bits to copy");
706
707 OutT newvector(bitlen);
708
709 // Handle bitvectors that are wrapped in strong types
710 auto& newvector_unwrapped = unwrap_strong_type(newvector);
711
712 if(bitlen > 0) {
713 if(pos % 8 == 0) {
714 copy_mem(
715 newvector_unwrapped.m_blocks,
716 std::span{m_blocks}.subspan(block_index(pos), block_index(pos + bitlen - 1) - block_index(pos) + 1));
717 } else {
718 BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> from_op(
719 *this, pos, bitlen);
720 BitRangeOperator<strong_type_wrapped_type<OutT>> to_op(
721 unwrap_strong_type(newvector_unwrapped), 0, bitlen);
722 range_operation([](auto /* to */, auto from) { return from; }, to_op, from_op);
723 }
724
725 newvector_unwrapped.zero_unused_bits();
726 }
727
728 return newvector;
729 }
730
731 /**
732 * Extracts a subvector of bits as an unsigned integral type @p OutT
733 * starting from bit @p pos and copying exactly sizeof(OutT)*8 bits.
734 *
735 * Hint: The bits are in big-endian order, i.e. the least significant bit
736 * is the 0th bit and the most significant bit it the n-th. Hence,
737 * addressing the bits with bitwise operations is done like so:
738 * bool bit = (out_int >> pos) & 1;
739 */
740 template <typename OutT>
741 requires(std::unsigned_integral<strong_type_wrapped_type<OutT>> &&
742 !std::same_as<bool, strong_type_wrapped_type<OutT>>)
743 OutT subvector(size_type pos) const {
744 using result_t = strong_type_wrapped_type<OutT>;
745 constexpr size_t bits = sizeof(result_t) * 8;
746 BOTAN_ARG_CHECK(pos + bits <= size(), "Not enough bits to copy");
747 result_t out = 0;
748
749 if(pos % 8 == 0) {
750 out = load_le<result_t>(std::span{m_blocks}.subspan(block_index(pos)).template first<sizeof(result_t)>());
751 } else {
752 BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*this, pos, bits);
753 range_operation(
754 [&](std::unsigned_integral auto integer) {
755 if constexpr(std::same_as<result_t, decltype(integer)>) {
756 out = integer;
757 }
758 },
759 op);
760 }
761
762 return wrap_strong_type<OutT>(out);
763 }
764
765 /**
766 * Replaces a subvector of bits with the bits of another bitvector @p value
767 * starting at bit @p pos. The number of bits to replace is determined by
768 * the size of @p value.
769 *
770 * @note This is currently supported for byte-aligned @p pos only.
771 *
772 * @throws Not_Implemented when called with @p pos not divisible by 8.
773 *
774 * @param pos the position to start replacing bits
775 * @param value the bitvector to copy bits from
776 */
777 template <typename InT>
778 requires(std::unsigned_integral<strong_type_wrapped_type<InT>> && !std::same_as<bool, InT>)
779 void subvector_replace(size_type pos, InT value) {
781 constexpr size_t bits = sizeof(in_t) * 8;
782 BOTAN_ARG_CHECK(pos + bits <= size(), "Not enough bits to replace");
783
784 if(pos % 8 == 0) {
785 store_le(std::span{m_blocks}.subspan(block_index(pos)).template first<sizeof(in_t)>(),
786 unwrap_strong_type(value));
787 } else {
788 BitRangeOperator<bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*this, pos, bits);
789 range_operation(
790 [&]<std::unsigned_integral BlockT>(BlockT block) -> BlockT {
791 if constexpr(std::same_as<in_t, BlockT>) {
792 return unwrap_strong_type(value);
793 } else {
794 // This should never be reached. BOTAN_ASSERT_UNREACHABLE()
795 // caused warning "unreachable code" on MSVC, though. You
796 // don't say!
797 //
798 // Returning the given block back, is the most reasonable
799 // thing to do in this case, though.
800 return block;
801 }
802 },
803 op);
804 }
805 }
806
807 /// @}
808
809 /// @name Operators
810 ///
811 /// @{
812
813 auto operator~() {
814 auto newbv = *this;
815 newbv.flip();
816 return newbv;
817 }
818
819 template <bitvectorish OtherT>
820 auto& operator|=(const OtherT& other) {
821 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs | rhs; },
822 *this,
823 unwrap_strong_type(other));
824 return *this;
825 }
826
827 template <bitvectorish OtherT>
828 auto& operator&=(const OtherT& other) {
829 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs & rhs; },
830 *this,
831 unwrap_strong_type(other));
832 return *this;
833 }
834
835 template <bitvectorish OtherT>
836 auto& operator^=(const OtherT& other) {
837 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs ^ rhs; },
838 *this,
839 unwrap_strong_type(other));
840 return *this;
841 }
842
843 /// @}
844
845 /// @name Constant Time Operations
846 ///
847 /// @{
848
849 /**
850 * Implements::
851 *
852 * if(condition) {
853 * *this ^= other;
854 * }
855 *
856 * omitting runtime dependence on any of the parameters.
857 */
858 template <bitvectorish OtherT>
859 void ct_conditional_xor(CT::Choice condition, const OtherT& other) {
860 BOTAN_ASSERT_NOMSG(m_bits == other.m_bits);
861 BOTAN_ASSERT_NOMSG(m_blocks.size() == other.m_blocks.size());
862
863 auto maybe_xor = overloaded{
864 [m = CT::Mask<uint64_t>::from_choice(condition)](uint64_t lhs, uint64_t rhs) -> uint64_t {
865 return lhs ^ m.if_set_return(rhs);
866 },
867 [m = CT::Mask<uint32_t>::from_choice(condition)](uint32_t lhs, uint32_t rhs) -> uint32_t {
868 return lhs ^ m.if_set_return(rhs);
869 },
870 [m = CT::Mask<uint16_t>::from_choice(condition)](uint16_t lhs, uint16_t rhs) -> uint16_t {
871 return lhs ^ m.if_set_return(rhs);
872 },
873 [m = CT::Mask<uint8_t>::from_choice(condition)](uint8_t lhs, uint8_t rhs) -> uint8_t {
874 return lhs ^ m.if_set_return(rhs);
875 },
876 };
877
878 full_range_operation(maybe_xor, *this, unwrap_strong_type(other));
879 }
880
881 constexpr void _const_time_poison() const { CT::poison(m_blocks); }
882
883 constexpr void _const_time_unpoison() const { CT::unpoison(m_blocks); }
884
885 /// @}
886
887 /// @name Iterators
888 ///
889 /// @{
890
891 iterator begin() noexcept { return iterator(this, 0); }
892
893 const_iterator begin() const noexcept { return const_iterator(this, 0); }
894
895 const_iterator cbegin() const noexcept { return const_iterator(this, 0); }
896
897 iterator end() noexcept { return iterator(this, size()); }
898
899 const_iterator end() const noexcept { return const_iterator(this, size()); }
900
901 const_iterator cend() noexcept { return const_iterator(this, size()); }
902
903 /// @}
904
905 private:
906 void check_offset(size_type pos) const {
907 // BOTAN_ASSERT_NOMSG(!CT::is_poisoned(&m_bits, sizeof(m_bits)));
908 // BOTAN_ASSERT_NOMSG(!CT::is_poisoned(&pos, sizeof(pos)));
909 BOTAN_ARG_CHECK(pos < m_bits, "Out of range");
910 }
911
912 void zero_unused_bits() {
913 const auto first_unused_bit = size();
914
915 // Zero out any unused bits in the last block
916 if(first_unused_bit % block_size_bits != 0) {
917 const block_type mask = (one << block_offset(first_unused_bit)) - one;
918 m_blocks[block_index(first_unused_bit)] &= mask;
919 }
920 }
921
922 static constexpr size_type ceil_toblocks(size_type bits) {
923 return (bits + block_size_bits - 1) / block_size_bits;
924 }
925
926 auto ref(size_type pos) const { return bitref<const block_type>(m_blocks, pos); }
927
928 auto ref(size_type pos) { return bitref<block_type>(m_blocks, pos); }
929
930 private:
931 enum class BitRangeAlignment : uint8_t { byte_aligned, no_alignment };
932
933 /**
934 * Helper construction to implement bit range operations on the bitvector.
935 * It basically implements an iterator to read and write blocks of bits
936 * from the underlying bitvector. Where "blocks of bits" are unsigned
937 * integers of varying bit lengths.
938 *
939 * If the iteration starts at a byte boundary in the underlying bitvector,
940 * this applies certain optimizations (i.e. loading blocks of bits straight
941 * from the underlying byte buffer). The optimizations are enabled at
942 * compile time (with the template parameter `alignment`).
943 */
944 template <typename BitvectorT, auto alignment = BitRangeAlignment::byte_aligned>
945 requires is_bitvector_v<std::remove_cvref_t<BitvectorT>>
946 class BitRangeOperator {
947 private:
948 constexpr static bool is_const() { return std::is_const_v<BitvectorT>; }
949
950 struct UnalignedDataHelper {
951 const uint8_t padding_bits;
952 const uint8_t bits_to_byte_alignment;
953 };
954
955 public:
956 BitRangeOperator(BitvectorT& source, size_type start_bitoffset, size_type bitlength) :
957 m_source(source),
958 m_start_bitoffset(start_bitoffset),
959 m_bitlength(bitlength),
960 m_unaligned_helper({.padding_bits = static_cast<uint8_t>(start_bitoffset % 8),
961 .bits_to_byte_alignment = static_cast<uint8_t>(8 - (start_bitoffset % 8))}),
962 m_read_bitpos(start_bitoffset),
963 m_write_bitpos(start_bitoffset) {
964 BOTAN_ASSERT(is_byte_aligned() == (m_start_bitoffset % 8 == 0), "byte alignment guarantee");
965 BOTAN_ASSERT(m_source.size() >= m_start_bitoffset + m_bitlength, "enough bytes in underlying source");
966 }
967
968 explicit BitRangeOperator(BitvectorT& source) : BitRangeOperator(source, 0, source.size()) {}
969
970 static constexpr bool is_byte_aligned() { return alignment == BitRangeAlignment::byte_aligned; }
971
972 /**
973 * @returns the overall number of bits to be iterated with this operator
974 */
975 size_type size() const { return m_bitlength; }
976
977 /**
978 * @returns the number of bits not yet read from this operator
979 */
980 size_type bits_to_read() const { return m_bitlength - m_read_bitpos + m_start_bitoffset; }
981
982 /**
983 * @returns the number of bits still to be written into this operator
984 */
985 size_type bits_to_write() const { return m_bitlength - m_write_bitpos + m_start_bitoffset; }
986
987 /**
988 * Loads the next block of bits from the underlying bitvector. No
989 * bounds checks are performed. The caller can define the size of
990 * the resulting unsigned integer block.
991 */
992 template <std::unsigned_integral BlockT>
993 BlockT load_next() const {
994 constexpr size_type block_size = sizeof(BlockT);
995 constexpr size_type block_bits = block_size * 8;
996 const auto bits_remaining = bits_to_read();
997
998 BlockT result_block = 0;
999 if constexpr(is_byte_aligned()) {
1000 result_block = load_le(m_source.as_byte_span().subspan(read_bytepos()).template first<block_size>());
1001 } else {
1002 const size_type byte_pos = read_bytepos();
1003 const size_type bits_to_collect = std::min(block_bits, bits_to_read());
1004
1005 const uint8_t first_byte = m_source.as_byte_span()[byte_pos];
1006
1007 // Initialize the left-most bits from the first byte.
1008 result_block = BlockT(first_byte) >> m_unaligned_helper.padding_bits;
1009
1010 // If more bits are needed, we pull them from the remaining bytes.
1011 if(m_unaligned_helper.bits_to_byte_alignment < bits_to_collect) {
1012 const BlockT block =
1013 load_le(m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>());
1014 result_block |= block << m_unaligned_helper.bits_to_byte_alignment;
1015 }
1016 }
1017
1018 m_read_bitpos += std::min(block_bits, bits_remaining);
1019 return result_block;
1020 }
1021
1022 /**
1023 * Stores the next block of bits into the underlying bitvector.
1024 * No bounds checks are performed. Storing bit blocks that are not
1025 * aligned at a byte-boundary in the underlying bitvector is
1026 * currently not implemented.
1027 */
1028 template <std::unsigned_integral BlockT>
1029 requires(!is_const())
1030 void store_next(BlockT block) {
1031 constexpr size_type block_size = sizeof(BlockT);
1032 constexpr size_type block_bits = block_size * 8;
1033
1034 if constexpr(is_byte_aligned()) {
1035 auto sink = m_source.as_byte_span().subspan(write_bytepos()).template first<block_size>();
1036 store_le(sink, block);
1037 } else {
1038 const size_type byte_pos = write_bytepos();
1039 const size_type bits_to_store = std::min(block_bits, bits_to_write());
1040
1041 uint8_t& first_byte = m_source.as_byte_span()[byte_pos];
1042
1043 // Set the left-most bits in the first byte, leaving all others unchanged
1044 first_byte = (first_byte & uint8_t(0xFF >> m_unaligned_helper.bits_to_byte_alignment)) |
1045 uint8_t(block << m_unaligned_helper.padding_bits);
1046
1047 // If more bits are provided, we store them in the remaining bytes.
1048 if(m_unaligned_helper.bits_to_byte_alignment < bits_to_store) {
1049 const auto remaining_bytes =
1050 m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>();
1051 const BlockT padding_mask = ~(BlockT(-1) >> m_unaligned_helper.bits_to_byte_alignment);
1052 const BlockT new_bytes =
1053 (load_le(remaining_bytes) & padding_mask) | block >> m_unaligned_helper.bits_to_byte_alignment;
1054 store_le(remaining_bytes, new_bytes);
1055 }
1056 }
1057
1058 m_write_bitpos += std::min(block_bits, bits_to_write());
1059 }
1060
1061 template <std::unsigned_integral BlockT>
1062 requires(is_byte_aligned() && !is_const())
1063 std::span<BlockT> span(size_type blocks) const {
1064 BOTAN_DEBUG_ASSERT(blocks == 0 || is_memory_aligned_to<BlockT>());
1065 BOTAN_DEBUG_ASSERT(read_bytepos() % sizeof(BlockT) == 0);
1066 // Intermittently casting to void* to avoid a compiler warning
1067 void* ptr = reinterpret_cast<void*>(m_source.as_byte_span().data() + read_bytepos());
1068 return {reinterpret_cast<BlockT*>(ptr), blocks};
1069 }
1070
1071 template <std::unsigned_integral BlockT>
1072 requires(is_byte_aligned() && is_const())
1073 std::span<const BlockT> span(size_type blocks) const {
1074 BOTAN_DEBUG_ASSERT(blocks == 0 || is_memory_aligned_to<BlockT>());
1075 BOTAN_DEBUG_ASSERT(read_bytepos() % sizeof(BlockT) == 0);
1076 // Intermittently casting to void* to avoid a compiler warning
1077 const void* ptr = reinterpret_cast<const void*>(m_source.as_byte_span().data() + read_bytepos());
1078 return {reinterpret_cast<const BlockT*>(ptr), blocks};
1079 }
1080
1081 void advance(size_type bytes)
1082 requires(is_byte_aligned())
1083 {
1084 m_read_bitpos += bytes * 8;
1085 m_write_bitpos += bytes * 8;
1086 }
1087
1088 template <std::unsigned_integral BlockT>
1089 requires(is_byte_aligned())
1090 size_t is_memory_aligned_to() const {
1091 const void* cptr = m_source.as_byte_span().data() + read_bytepos();
1092 const void* ptr_before = cptr;
1093
1094 // std::align takes `ptr` as a reference (!), i.e. `void*&` and
1095 // uses it as an out-param. Though, `cptr` is const because this
1096 // method is const-qualified, hence the const_cast<>.
1097 void* ptr = const_cast<void*>(cptr);
1098 size_t size = sizeof(BlockT);
1099 return ptr_before != nullptr && std::align(alignof(BlockT), size, ptr, size) == ptr_before;
1100 }
1101
1102 private:
1103 size_type read_bytepos() const { return m_read_bitpos / 8; }
1104
1105 size_type write_bytepos() const { return m_write_bitpos / 8; }
1106
1107 private:
1108 BitvectorT& m_source;
1109 size_type m_start_bitoffset;
1110 size_type m_bitlength;
1111
1112 UnalignedDataHelper m_unaligned_helper;
1113
1114 mutable size_type m_read_bitpos;
1115 mutable size_type m_write_bitpos;
1116 };
1117
1118 /**
1119 * Helper struct for the low-level handling of blockwise operations
1120 *
1121 * This has two main code paths: Optimized for byte-aligned ranges that
1122 * can simply be taken from memory as-is. And a generic implementation
1123 * that must assemble blocks from unaligned bits before processing.
1124 */
1125 template <typename FnT, typename... ParamTs>
1126 requires detail::blockwise_processing_callback<FnT, ParamTs...>
1127 class blockwise_processing_callback_trait {
1128 public:
1129 constexpr static bool needs_mask = detail::blockwise_processing_callback_with_mask<FnT, ParamTs...>;
1130 constexpr static bool is_manipulator = detail::manipulating_blockwise_processing_callback<FnT, ParamTs...>;
1131 constexpr static bool is_predicate = detail::predicate_blockwise_processing_callback<FnT, ParamTs...>;
1132 static_assert(!is_manipulator || !is_predicate, "cannot be manipulator and predicate at the same time");
1133
1134 /**
1135 * Applies @p fn to the blocks provided in @p blocks by simply reading from
1136 * memory without re-arranging any bits across byte-boundaries.
1137 */
1138 template <std::unsigned_integral... BlockTs>
1139 requires(all_same_v<std::remove_cv_t<BlockTs>...> && sizeof...(BlockTs) == sizeof...(ParamTs))
1140 constexpr static bool apply_on_full_blocks(FnT fn, std::span<BlockTs>... blocks) {
1141 constexpr size_type bits = sizeof(detail::first_t<BlockTs...>) * 8;
1142 const size_type iterations = detail::first(blocks...).size();
1143 for(size_type i = 0; i < iterations; ++i) {
1144 if constexpr(is_predicate) {
1145 if(!apply(fn, bits, blocks[i]...)) {
1146 return false;
1147 }
1148 } else if constexpr(is_manipulator) {
1149 detail::first(blocks...)[i] = apply(fn, bits, blocks[i]...);
1150 } else {
1151 apply(fn, bits, blocks[i]...);
1152 }
1153 }
1154 return true;
1155 }
1156
1157 /**
1158 * Applies @p fn to as many blocks as @p ops provide for the given type.
1159 */
1160 template <std::unsigned_integral BlockT, typename... BitRangeOperatorTs>
1161 requires(sizeof...(BitRangeOperatorTs) == sizeof...(ParamTs))
1162 constexpr static bool apply_on_unaligned_blocks(FnT fn, BitRangeOperatorTs&... ops) {
1163 constexpr size_type block_bits = sizeof(BlockT) * 8;
1164 auto bits = detail::first(ops...).bits_to_read();
1165 if(bits == 0) {
1166 return true;
1167 }
1168
1169 bits += block_bits; // avoid unsigned integer underflow in the following loop
1170 while(bits > block_bits * 2 - 8) {
1171 bits -= block_bits;
1172 if constexpr(is_predicate) {
1173 if(!apply(fn, bits, ops.template load_next<BlockT>()...)) {
1174 return false;
1175 }
1176 } else if constexpr(is_manipulator) {
1177 detail::first(ops...).store_next(apply(fn, bits, ops.template load_next<BlockT>()...));
1178 } else {
1179 apply(fn, bits, ops.template load_next<BlockT>()...);
1180 }
1181 }
1182 return true;
1183 }
1184
1185 private:
1186 template <std::unsigned_integral... BlockTs>
1187 requires(all_same_v<BlockTs...>)
1188 constexpr static auto apply(FnT fn, size_type bits, BlockTs... blocks) {
1189 if constexpr(needs_mask) {
1190 return fn(blocks..., make_mask<detail::first_t<BlockTs...>>(bits));
1191 } else {
1192 return fn(blocks...);
1193 }
1194 }
1195 };
1196
1197 /**
1198 * Helper function of `full_range_operation` and `range_operation` that
1199 * calls @p fn on a given aligned unsigned integer block as long as the
1200 * underlying bit range contains enough bits to fill the block fully.
1201 *
1202 * This uses bare memory access to gain a speed up for aligned data.
1203 */
1204 template <std::unsigned_integral BlockT, typename FnT, typename... BitRangeOperatorTs>
1205 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1206 sizeof...(BitRangeOperatorTs) > 0)
1207 static bool _process_in_fully_aligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
1208 constexpr size_type block_bytes = sizeof(BlockT);
1209 constexpr size_type block_bits = block_bytes * 8;
1210 const size_type blocks = detail::first(ops...).bits_to_read() / block_bits;
1211
1212 using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1213 const auto result = callback_trait::apply_on_full_blocks(fn, ops.template span<BlockT>(blocks)...);
1214 (ops.advance(block_bytes * blocks), ...);
1215 return result;
1216 }
1217
1218 /**
1219 * Helper function of `full_range_operation` and `range_operation` that
1220 * calls @p fn on a given unsigned integer block size as long as the
1221 * underlying bit range contains enough bits to fill the block.
1222 */
1223 template <std::unsigned_integral BlockT, typename FnT, typename... BitRangeOperatorTs>
1224 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...>)
1225 static bool _process_in_unaligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
1226 using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1227 return callback_trait::template apply_on_unaligned_blocks<BlockT>(fn, ops...);
1228 }
1229
1230 /**
1231 * Apply @p fn to all bits in the ranges defined by @p ops. If more than
1232 * one range operator is passed to @p ops, @p fn receives corresponding
1233 * blocks of bits from each operator. Therefore, all @p ops have to define
1234 * the exact same length of their underlying ranges.
1235 *
1236 * @p fn may return a bit block that will be stored into the _first_ bit
1237 * range passed into @p ops. If @p fn returns a boolean, and its value is
1238 * `false`, the range operation is cancelled and `false` is returned.
1239 *
1240 * The implementation ensures to pull bits in the largest bit blocks
1241 * possible and reverts to smaller bit blocks only when needed.
1242 */
1243 template <typename FnT, typename... BitRangeOperatorTs>
1244 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1245 sizeof...(BitRangeOperatorTs) > 0)
1246 static bool range_operation(FnT fn, BitRangeOperatorTs... ops) {
1247 BOTAN_ASSERT(has_equal_lengths(ops...), "all BitRangeOperators have the same length");
1248
1249 if constexpr((BitRangeOperatorTs::is_byte_aligned() && ...)) {
1250 // Note: At the moment we can assume that this will always be used
1251 // on the _entire_ bitvector. Therefore, we can safely assume
1252 // that the bitvectors' underlying buffers are properly aligned.
1253 // If this assumption changes, we need to add further handling
1254 // to process a byte padding at the beginning of the bitvector
1255 // until a memory alignment boundary is reached.
1256 const bool alignment = (ops.template is_memory_aligned_to<uint64_t>() && ...);
1257 BOTAN_ASSERT_NOMSG(alignment);
1258
1259 return _process_in_fully_aligned_blocks_of<uint64_t>(fn, ops...) &&
1260 _process_in_fully_aligned_blocks_of<uint32_t>(fn, ops...) &&
1261 _process_in_fully_aligned_blocks_of<uint16_t>(fn, ops...) &&
1262 _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
1263 } else {
1264 return _process_in_unaligned_blocks_of<uint64_t>(fn, ops...) &&
1265 _process_in_unaligned_blocks_of<uint32_t>(fn, ops...) &&
1266 _process_in_unaligned_blocks_of<uint16_t>(fn, ops...) &&
1267 _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
1268 }
1269 }
1270
1271 /**
1272 * Apply @p fn to all bit blocks in the bitvector(s).
1273 */
1274 template <typename FnT, typename... BitvectorTs>
1275 requires(detail::blockwise_processing_callback<FnT, BitvectorTs...> &&
1276 (is_bitvector_v<std::remove_cvref_t<BitvectorTs>> && ... && true))
1277 static bool full_range_operation(FnT&& fn, BitvectorTs&... bitvecs) {
1278 BOTAN_ASSERT(has_equal_lengths(bitvecs...), "all bitvectors have the same length");
1279 return range_operation(std::forward<FnT>(fn), BitRangeOperator<BitvectorTs>(bitvecs)...);
1280 }
1281
1282 template <typename SomeT, typename... SomeTs>
1283 static bool has_equal_lengths(const SomeT& v, const SomeTs&... vs) {
1284 return ((v.size() == vs.size()) && ... && true);
1285 }
1286
1287 template <std::unsigned_integral T>
1288 static constexpr T make_mask(size_type bits) {
1289 const bool max = bits >= sizeof(T) * 8;
1290 bits &= T(max - 1);
1291 return (T(!max) << bits) - 1;
1292 }
1293
1294 auto as_byte_span() { return std::span{m_blocks.data(), m_blocks.size() * sizeof(block_type)}; }
1295
1296 auto as_byte_span() const { return std::span{m_blocks.data(), m_blocks.size() * sizeof(block_type)}; }
1297
1298 private:
1299 size_type m_bits;
1300 std::vector<block_type, allocator_type> m_blocks;
1301};
1302
1305
1306namespace detail {
1307
1308/**
1309 * If one of the allocators is a Botan::secure_allocator, this will always
1310 * prefer it. Otherwise, the allocator of @p lhs will be used as a default.
1311 */
1312template <bitvectorish T1, bitvectorish T2>
1313constexpr auto copy_lhs_allocator_aware(const T1& lhs, const T2& /*rhs*/) {
1314 constexpr bool needs_secure_allocator =
1316
1317 if constexpr(needs_secure_allocator) {
1318 return lhs.template as<secure_bitvector>();
1319 } else {
1320 return lhs.template as<bitvector>();
1321 }
1322}
1323
1324} // namespace detail
1325
1326template <bitvectorish T1, bitvectorish T2>
1327auto operator|(const T1& lhs, const T2& rhs) {
1328 auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
1329 res |= rhs;
1330 return res;
1331}
1332
1333template <bitvectorish T1, bitvectorish T2>
1334auto operator&(const T1& lhs, const T2& rhs) {
1335 auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
1336 res &= rhs;
1337 return res;
1338}
1339
1340template <bitvectorish T1, bitvectorish T2>
1341auto operator^(const T1& lhs, const T2& rhs) {
1342 auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
1343 res ^= rhs;
1344 return res;
1345}
1346
1347template <bitvectorish T1, bitvectorish T2>
1348bool operator==(const T1& lhs, const T2& rhs) {
1349 return lhs.equals_vartime(rhs);
1350}
1351
1352template <bitvectorish T1, bitvectorish T2>
1353bool operator!=(const T1& lhs, const T2& rhs) {
1354 return lhs.equals_vartime(rhs);
1355}
1356
1357namespace detail {
1358
1359/**
1360 * A Strong<> adapter for arbitrarily large bitvectors
1361 */
1362template <concepts::container T>
1363 requires is_bitvector_v<T>
1365 public:
1366 using size_type = typename T::size_type;
1367
1368 public:
1370
1371 auto at(size_type i) const { return this->get().at(i); }
1372
1373 auto at(size_type i) { return this->get().at(i); }
1374
1375 auto set(size_type i) { return this->get().set(i); }
1376
1377 auto unset(size_type i) { return this->get().unset(i); }
1378
1379 auto flip(size_type i) { return this->get().flip(i); }
1380
1381 auto flip() { return this->get().flip(); }
1382
1383 template <typename OutT>
1384 auto as() const {
1385 return this->get().template as<OutT>();
1386 }
1387
1388 template <bitvectorish OutT = T>
1389 auto subvector(size_type pos, std::optional<size_type> length = std::nullopt) const {
1390 return this->get().template subvector<OutT>(pos, length);
1391 }
1392
1393 template <typename OutT>
1394 requires(std::unsigned_integral<strong_type_wrapped_type<OutT>> &&
1395 !std::same_as<bool, strong_type_wrapped_type<OutT>>)
1396 auto subvector(size_type pos) const {
1397 return this->get().template subvector<OutT>(pos);
1398 }
1399
1400 template <typename InT>
1401 requires(std::unsigned_integral<strong_type_wrapped_type<InT>> && !std::same_as<bool, InT>)
1402 void subvector_replace(size_type pos, InT value) {
1403 return this->get().subvector_replace(pos, value);
1404 }
1405
1406 template <bitvectorish OtherT>
1407 auto equals(const OtherT& other) const {
1408 return this->get().equals(other);
1409 }
1410
1411 auto push_back(bool b) { return this->get().push_back(b); }
1412
1413 auto pop_back() { return this->get().pop_back(); }
1414
1415 auto front() const { return this->get().front(); }
1416
1417 auto front() { return this->get().front(); }
1418
1419 auto back() const { return this->get().back(); }
1420
1421 auto back() { return this->get().back(); }
1422
1423 auto any_vartime() const { return this->get().any_vartime(); }
1424
1425 auto all_vartime() const { return this->get().all_vartime(); }
1426
1427 auto none_vartime() const { return this->get().none_vartime(); }
1428
1429 auto has_odd_hamming_weight() const { return this->get().has_odd_hamming_weight(); }
1430
1431 auto hamming_weight() const { return this->get().hamming_weight(); }
1432
1433 auto from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
1434 return this->get().from_bytes(bytes, bits);
1435 }
1436
1437 template <typename OutT = T>
1438 auto to_bytes() const {
1439 return this->get().template to_bytes<OutT>();
1440 }
1441
1442 auto to_bytes(std::span<uint8_t> out) const { return this->get().to_bytes(out); }
1443
1444 auto to_string() const { return this->get().to_string(); }
1445
1446 auto capacity() const { return this->get().capacity(); }
1447
1448 auto reserve(size_type n) { return this->get().reserve(n); }
1449
1450 constexpr void _const_time_poison() const { this->get()._const_time_poison(); }
1451
1452 constexpr void _const_time_unpoison() const { this->get()._const_time_unpoison(); }
1453};
1454
1455} // namespace detail
1456
1457} // namespace Botan
1458
1459#endif
#define BOTAN_ASSERT_NOMSG(expr)
Definition assert.h:75
#define BOTAN_DEBUG_ASSERT(expr)
Definition assert.h:129
#define BOTAN_ARG_CHECK(expr, msg)
Definition assert.h:33
#define BOTAN_ASSERT(expr, assertion_made)
Definition assert.h:62
static constexpr Choice from_int(T v)
Definition ct_utils.h:314
static constexpr Mask< T > expand(T v)
Definition ct_utils.h:420
static constexpr Mask< T > from_choice(Choice c)
Definition ct_utils.h:430
bitref(bitref &&) noexcept=default
constexpr bitref & operator&=(bool other) noexcept
Definition bitvector.h:358
constexpr bitref & operator=(bitref &&bit) noexcept
Definition bitvector.h:354
constexpr bitref & flip() noexcept
Definition bitvector.h:339
constexpr bitref & operator=(bool bit) noexcept
Definition bitvector.h:346
constexpr bitref & operator=(const bitref &bit) noexcept
Definition bitvector.h:352
constexpr bitref & operator^=(bool other) noexcept
Definition bitvector.h:368
constexpr bitref & unset() noexcept
Definition bitvector.h:334
bitref(const bitref &) noexcept=default
constexpr bitref & set() noexcept
Definition bitvector.h:329
constexpr bitref & operator|=(bool other) noexcept
Definition bitvector.h:363
const_iterator begin() const noexcept
Definition bitvector.h:893
bitvector_base & flip()
Definition bitvector.h:647
bitvector_base & flip(size_type pos)
Definition bitvector.h:638
bitvector_base & unset()
Definition bitvector.h:627
void reserve(size_type bits)
Definition bitvector.h:541
bitvector_base & unset(size_type pos)
Definition bitvector.h:618
bool none() const
Definition bitvector.h:663
auto at(size_type pos) const
Definition bitvector.h:576
auto operator[](size_type pos) const
Definition bitvector.h:691
CT::Choice has_odd_hamming_weight() const
Definition bitvector.h:408
void subvector_replace(size_type pos, InT value)
Definition bitvector.h:779
constexpr void _const_time_unpoison() const
Definition bitvector.h:883
std::string to_string() const
Definition bitvector.h:526
bitvector_base & set(size_type pos)
Definition bitvector.h:595
static constexpr size_type block_size_bytes
Definition bitvector.h:241
void from_bytes(std::span< const uint8_t > bytes, std::optional< size_type > bits=std::nullopt)
Definition bitvector.h:467
bitvector_base(size_type bits)
Definition bitvector.h:377
size_type hamming_weight() const
Definition bitvector.h:423
auto & operator&=(const OtherT &other)
Definition bitvector.h:828
block_type value_type
Definition bitvector.h:237
const_iterator cbegin() const noexcept
Definition bitvector.h:895
bitvector_base & set()
Definition bitvector.h:604
static constexpr bool uses_secure_allocator
Definition bitvector.h:243
detail::bitvector_iterator< bitvector_base< AllocatorT > > iterator
Definition bitvector.h:238
void ct_conditional_xor(CT::Choice condition, const OtherT &other)
Definition bitvector.h:859
bool none_vartime() const
Definition bitvector.h:656
AllocatorT< block_type > allocator_type
Definition bitvector.h:236
auto operator[](size_type pos)
Definition bitvector.h:688
bool all_vartime() const
Definition bitvector.h:678
void to_bytes(std::span< uint8_t > out) const
Definition bitvector.h:504
size_type capacity() const
Definition bitvector.h:539
friend class bitvector_base
Definition bitvector.h:247
auto & operator|=(const OtherT &other)
Definition bitvector.h:820
auto subvector(size_type pos, std::optional< size_type > length=std::nullopt) const
Definition bitvector.h:703
auto at(size_type pos)
Definition bitvector.h:570
bool empty() const
Definition bitvector.h:401
static constexpr size_type block_size_bits
Definition bitvector.h:242
detail::bitvector_iterator< const bitvector_base< AllocatorT > > const_iterator
Definition bitvector.h:239
void resize(size_type bits)
Definition bitvector.h:543
const_iterator end() const noexcept
Definition bitvector.h:899
bitvector_base(std::span< const uint8_t > bytes, std::optional< size_type > bits=std::nullopt)
Definition bitvector.h:392
bool any_vartime() const
Definition bitvector.h:668
void push_back(bool bit)
Definition bitvector.h:553
auto back() const
Definition bitvector.h:589
constexpr void _const_time_poison() const
Definition bitvector.h:881
OutT to_bytes() const
Definition bitvector.h:493
OutT subvector(size_type pos) const
Definition bitvector.h:743
auto & operator^=(const OtherT &other)
Definition bitvector.h:836
bool equals(const OtherT &other) const noexcept
Definition bitvector.h:452
auto front() const
Definition bitvector.h:584
const_iterator cend() noexcept
Definition bitvector.h:901
bitvector_base(std::initializer_list< block_type > blocks, std::optional< size_type > bits=std::nullopt)
Definition bitvector.h:398
bool equals_vartime(const OtherT &other) const noexcept
Definition bitvector.h:441
typename T::size_type size_type
Definition bitvector.h:1366
auto equals(const OtherT &other) const
Definition bitvector.h:1407
constexpr void _const_time_poison() const
Definition bitvector.h:1450
void subvector_replace(size_type pos, InT value)
Definition bitvector.h:1402
auto subvector(size_type pos) const
Definition bitvector.h:1396
auto from_bytes(std::span< const uint8_t > bytes, std::optional< size_type > bits=std::nullopt)
Definition bitvector.h:1433
auto subvector(size_type pos, std::optional< size_type > length=std::nullopt) const
Definition bitvector.h:1389
Strong_Adapter(std::span< const typename Container_Strong_Adapter_Base< T >::value_type > span)
auto at(size_type i) const
Definition bitvector.h:1371
auto to_bytes(std::span< uint8_t > out) const
Definition bitvector.h:1442
constexpr void _const_time_unpoison() const
Definition bitvector.h:1452
constexpr T & get() &
Definition strong_type.h:52
std::bidirectional_iterator_tag iterator_category
Definition bitvector.h:134
bitvector_iterator & operator=(bitvector_iterator &&other) noexcept
Definition bitvector.h:158
std::remove_const_t< decltype(std::declval< T >().at(0))> value_type
Definition bitvector.h:129
std::partial_ordering operator<=>(const bitvector_iterator &other) const noexcept
Definition bitvector.h:186
bitvector_iterator & operator=(const bitvector_iterator &other) noexcept
Definition bitvector.h:150
bool operator==(const bitvector_iterator &other) const noexcept
Definition bitvector.h:194
bitvector_iterator operator--(int) noexcept
Definition bitvector.h:180
bitvector_iterator & operator++() noexcept
Definition bitvector.h:164
bitvector_iterator & operator--() noexcept
Definition bitvector.h:175
bitvector_iterator(bitvector_iterator &&other) noexcept
Definition bitvector.h:146
bitvector_iterator(T *bitvector, size_t offset)
Definition bitvector.h:140
std::make_signed_t< size_type > difference_type
Definition bitvector.h:128
bitvector_iterator operator++(int) noexcept
Definition bitvector.h:169
bitvector_iterator(const bitvector_iterator &other) noexcept
Definition bitvector.h:142
constexpr void unpoison(const T *p, size_t n)
Definition ct_utils.h:65
constexpr void poison(const T *p, size_t n)
Definition ct_utils.h:54
std::invoke_result_t< FnT, as< BlockT, ParamTs >... > blockwise_processing_callback_return_type
Definition bitvector.h:77
constexpr auto copy_lhs_allocator_aware(const T1 &lhs, const T2 &)
Definition bitvector.h:1313
typename first_type< Ts... >::type first_t
Definition bitvector.h:63
ASN1_Type operator|(ASN1_Type x, ASN1_Type y)
Definition asn1_obj.h:74
bitvector_base< secure_allocator > secure_bitvector
Definition bitvector.h:1303
constexpr void copy_mem(T *out, const T *in, size_t n)
Definition mem_ops.h:145
OctetString operator^(const OctetString &k1, const OctetString &k2)
Definition symkey.cpp:109
bitvector_base< std::allocator > bitvector
Definition bitvector.h:1304
constexpr uint8_t ceil_log2(T x)
Definition bit_ops.h:120
bool operator!=(const AlgorithmIdentifier &a1, const AlgorithmIdentifier &a2)
Definition alg_id.cpp:68
constexpr decltype(auto) unwrap_strong_type(T &&t)
Generically unwraps a strong type to its underlying type.
constexpr auto store_le(ParamTs &&... params)
Definition loadstor.h:736
typename detail::wrapped_type_helper< std::remove_cvref_t< T > >::type strong_type_wrapped_type
Extracts the wrapped type from a strong type.
bool operator==(const AlgorithmIdentifier &a1, const AlgorithmIdentifier &a2)
Definition alg_id.cpp:53
BOTAN_FORCE_INLINE constexpr T ceil_tobytes(T bits)
Definition bit_ops.h:155
BOTAN_FORCE_INLINE constexpr uint8_t ct_popcount(T x)
Definition bit_ops.h:253
constexpr decltype(auto) wrap_strong_type(ParamT &&t)
Wraps a value into a caller-defined (strong) type.
constexpr auto load_le(ParamTs &&... params)
Definition loadstor.h:495
constexpr void typecast_copy(ToR &&out, const FromR &in)
Definition mem_ops.h:177
constexpr auto bitlen(size_t x)
constexpr void clear_mem(T *ptr, size_t n)
Definition mem_ops.h:119
ECIES_Flags operator&(ECIES_Flags a, ECIES_Flags b)
Definition ecies.h:70