Botan 3.7.1
Crypto and TLS for C&
bitvector.h
Go to the documentation of this file.
1/*
2 * An abstraction for an arbitrarily large bitvector that can
3 * optionally use the secure_allocator. All bitwise accesses and all
4 * constructors are implemented in constant time. Otherwise, only methods
5 * with the "ct_" pre-fix run in constant time.
6 *
7 * (C) 2023-2024 Jack Lloyd
8 * (C) 2023-2024 René Meusel, Rohde & Schwarz Cybersecurity
9 *
10 * Botan is released under the Simplified BSD License (see license.txt)
11 */
12
13#ifndef BOTAN_BIT_VECTOR_H_
14#define BOTAN_BIT_VECTOR_H_
15
16#include <botan/concepts.h>
17#include <botan/exceptn.h>
18#include <botan/mem_ops.h>
19#include <botan/secmem.h>
20#include <botan/strong_type.h>
21#include <botan/internal/bit_ops.h>
22#include <botan/internal/ct_utils.h>
23#include <botan/internal/loadstor.h>
24#include <botan/internal/stl_util.h>
25
26#include <optional>
27#include <span>
28#include <sstream>
29#include <string>
30#include <utility>
31#include <vector>
32
33namespace Botan {
34
35template <template <typename> typename AllocatorT>
36class bitvector_base;
37
38template <typename T>
39struct is_bitvector : std::false_type {};
40
41template <template <typename> typename T>
42struct is_bitvector<bitvector_base<T>> : std::true_type {};
43
44template <typename T>
45constexpr static bool is_bitvector_v = is_bitvector<T>::value;
46
47template <typename T>
48concept bitvectorish = is_bitvector_v<strong_type_wrapped_type<T>>;
49
50namespace detail {
51
52template <typename T0, typename... Ts>
53struct first_type {
54 using type = T0;
55};
56
57// get the first type from a parameter pack
58// TODO: C++26 will bring Parameter Pack indexing:
59// using first_t = Ts...[0];
60template <typename... Ts>
61 requires(sizeof...(Ts) > 0)
63
64// get the first object from a parameter pack
65// TODO: C++26 will bring Parameter Pack indexing:
66// auto first = s...[0];
67template <typename T0, typename... Ts>
68constexpr static first_t<T0, Ts...> first(T0&& t, Ts&&...) {
69 return std::forward<T0>(t);
70}
71
72template <typename OutT, typename>
73using as = OutT;
74
75template <typename FnT, std::unsigned_integral BlockT, typename... ParamTs>
76using blockwise_processing_callback_return_type = std::invoke_result_t<FnT, as<BlockT, ParamTs>...>;
77
78template <typename FnT, typename BlockT, typename... ParamTs>
80 std::unsigned_integral<BlockT> &&
81 (std::same_as<BlockT, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>> ||
82 std::same_as<bool, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>> ||
83 std::same_as<void, blockwise_processing_callback_return_type<FnT, BlockT, ParamTs...>>);
84
85template <typename FnT, typename... ParamTs>
87 is_blockwise_processing_callback_return_type<FnT, uint8_t, ParamTs...> &&
88 is_blockwise_processing_callback_return_type<FnT, uint16_t, ParamTs...> &&
89 is_blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...> &&
90 is_blockwise_processing_callback_return_type<FnT, uint64_t, ParamTs...>;
91
92template <typename FnT, typename... ParamTs>
94 is_blockwise_processing_callback_return_type<FnT, uint8_t, ParamTs..., uint8_t /* mask */> &&
95 is_blockwise_processing_callback_return_type<FnT, uint16_t, ParamTs..., uint16_t /* mask */> &&
96 is_blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., uint32_t /* mask */> &&
97 is_blockwise_processing_callback_return_type<FnT, uint64_t, ParamTs..., uint64_t /* mask */>;
98
99/**
100 * Defines the callback constraints for the BitRangeOperator. For further
101 * details, see bitvector_base::range_operation().
102 */
103template <typename FnT, typename... ParamTs>
106
107template <typename FnT, typename... ParamTs>
110 std::same_as<uint32_t, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...>>) ||
112 std::same_as<uint32_t, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., first_t<ParamTs...>>>);
113
114template <typename FnT, typename... ParamTs>
117 std::same_as<bool, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs...>>) ||
119 std::same_as<bool, blockwise_processing_callback_return_type<FnT, uint32_t, ParamTs..., first_t<ParamTs...>>>);
120
121template <typename T>
123 private:
124 using size_type = typename T::size_type;
125
126 public:
127 using difference_type = std::make_signed_t<size_type>;
128 using value_type = std::remove_const_t<decltype(std::declval<T>().at(0))>;
131
132 // TODO: technically, this could be a random access iterator
133 using iterator_category = std::bidirectional_iterator_tag;
134
135 public:
138
139 bitvector_iterator(T* bitvector, size_t offset) : m_bitvector(bitvector) { update(offset); }
140
141 bitvector_iterator(const bitvector_iterator& other) noexcept : m_bitvector(other.m_bitvector) {
142 update(other.m_offset);
143 }
144
145 bitvector_iterator(bitvector_iterator&& other) noexcept : m_bitvector(other.m_bitvector) {
146 update(other.m_offset);
147 }
148
150 if(this != &other) {
151 m_bitvector = other.m_bitvector;
152 update(other.m_offset);
153 }
154 return *this;
155 }
156
158 m_bitvector = other.m_bitvector;
159 update(other.m_offset);
160 return *this;
161 }
162
164 update(signed_offset() + 1);
165 return *this;
166 }
167
169 auto copy = *this;
170 update(signed_offset() + 1);
171 return copy;
172 }
173
175 update(signed_offset() - 1);
176 return *this;
177 }
178
180 auto copy = *this;
181 update(signed_offset() - 1);
182 return copy;
183 }
184
185 std::partial_ordering operator<=>(const bitvector_iterator& other) const noexcept {
186 if(m_bitvector == other.m_bitvector) {
187 return m_offset <=> other.m_offset;
188 } else {
189 return std::partial_ordering::unordered;
190 }
191 }
192
193 bool operator==(const bitvector_iterator& other) const noexcept {
194 return m_bitvector == other.m_bitvector && m_offset == other.m_offset;
195 }
196
197 reference operator*() const { return m_bitref.value(); }
198
199 pointer operator->() const { return &(m_bitref.value()); }
200
201 private:
202 void update(size_type new_offset) {
203 m_offset = new_offset;
204 if(m_offset < m_bitvector->size()) {
205 m_bitref.emplace((*m_bitvector)[m_offset]);
206 } else {
207 // end() iterator
208 m_bitref.reset();
209 }
210 }
211
212 difference_type signed_offset() const { return static_cast<difference_type>(m_offset); }
213
214 private:
215 T* m_bitvector;
216 size_type m_offset;
217 mutable std::optional<value_type> m_bitref;
218};
219
220} // namespace detail
221
222/**
223 * An arbitrarily large bitvector with typical bit manipulation and convenient
224 * bitwise access methods. Don't use `bitvector_base` directly, but the type
225 * aliases::
226 *
227 * * bitvector - with a standard allocator
228 * * secure_bitvector - with a secure allocator that auto-scrubs the memory
229 */
230template <template <typename> typename AllocatorT>
232 public:
233 using block_type = uint8_t;
234 using size_type = size_t;
235 using allocator_type = AllocatorT<block_type>;
239
240 static constexpr size_type block_size_bytes = sizeof(block_type);
242 static constexpr bool uses_secure_allocator = std::is_same_v<allocator_type, secure_allocator<block_type>>;
243
244 private:
245 template <template <typename> typename FriendAllocatorT>
246 friend class bitvector_base;
247
248 static constexpr block_type one = block_type(1);
249
250 static constexpr size_type block_offset_shift = size_type(3) + ceil_log2(block_size_bytes);
251 static constexpr size_type block_index_mask = (one << block_offset_shift) - 1;
252
253 static constexpr size_type block_index(size_type pos) { return pos >> block_offset_shift; }
254
255 static constexpr size_type block_offset(size_type pos) { return pos & block_index_mask; }
256
257 private:
258 /**
259 * Internal helper to wrap a single bit in the bitvector and provide
260 * certain convenience access methods.
261 */
262 template <typename BlockT>
263 requires std::same_as<block_type, std::remove_cv_t<BlockT>>
264 class bitref_base {
265 private:
266 friend class bitvector_base<AllocatorT>;
267
268 constexpr bitref_base(std::span<BlockT> blocks, size_type pos) noexcept :
269 m_block(blocks[block_index(pos)]), m_mask(one << block_offset(pos)) {}
270
271 public:
272 bitref_base() = delete;
273 bitref_base(const bitref_base&) noexcept = default;
274 bitref_base(bitref_base&&) noexcept = default;
275 bitref_base& operator=(const bitref_base&) = delete;
276 bitref_base& operator=(bitref_base&&) = delete;
277
278 ~bitref_base() = default;
279
280 public:
281 constexpr operator bool() const noexcept { return is_set(); }
282
283 constexpr bool is_set() const noexcept { return (m_block & m_mask) > 0; }
284
285 template <std::integral T>
286 constexpr T as() const noexcept {
287 return static_cast<T>(is_set());
288 }
289
290 constexpr CT::Choice as_choice() const noexcept {
291 return CT::Choice::from_int(static_cast<BlockT>(m_block & m_mask));
292 }
293
294 protected:
295 BlockT& m_block; // NOLINT(*-non-private-member-variables-in-classes)
296 BlockT m_mask; // NOLINT(*-non-private-member-variables-in-classes)
297 };
298
299 public:
300 /**
301 * Wraps a constant reference into the bitvector. Bit can be accessed
302 * but not modified.
303 */
304 template <typename BlockT>
305 class bitref final : public bitref_base<BlockT> {
306 public:
307 using bitref_base<BlockT>::bitref_base;
308 };
309
310 /**
311 * Wraps a modifiable reference into the bitvector. Bit may be accessed
312 * and modified (e.g. flipped or XOR'ed).
313 *
314 * Constant-time operations are used for the bit manipulations. The
315 * location of the bit in the bit vector may be leaked, though.
316 */
317 template <typename BlockT>
318 requires(!std::is_const_v<BlockT>)
319 class bitref<BlockT> : public bitref_base<BlockT> {
320 public:
321 using bitref_base<BlockT>::bitref_base;
322
323 ~bitref() = default;
324 bitref(const bitref&) noexcept = default;
325 bitref(bitref&&) noexcept = default;
326
327 constexpr bitref& set() noexcept {
328 this->m_block |= this->m_mask;
329 return *this;
330 }
331
332 constexpr bitref& unset() noexcept {
333 this->m_block &= ~this->m_mask;
334 return *this;
335 }
336
337 constexpr bitref& flip() noexcept {
338 this->m_block ^= this->m_mask;
339 return *this;
340 }
341
342 // NOLINTBEGIN
343
344 constexpr bitref& operator=(bool bit) noexcept {
345 this->m_block =
346 CT::Mask<BlockT>::expand(bit).select(this->m_mask | this->m_block, this->m_block & ~this->m_mask);
347 return *this;
348 }
349
350 constexpr bitref& operator=(const bitref& bit) noexcept { return *this = bit.is_set(); }
351
352 constexpr bitref& operator=(bitref&& bit) noexcept { return *this = bit.is_set(); }
353
354 // NOLINTEND
355
356 constexpr bitref& operator&=(bool other) noexcept {
357 this->m_block &= ~CT::Mask<BlockT>::expand(other).if_not_set_return(this->m_mask);
358 return *this;
359 }
360
361 constexpr bitref& operator|=(bool other) noexcept {
362 this->m_block |= CT::Mask<BlockT>::expand(other).if_set_return(this->m_mask);
363 return *this;
364 }
365
366 constexpr bitref& operator^=(bool other) noexcept {
367 this->m_block ^= CT::Mask<BlockT>::expand(other).if_set_return(this->m_mask);
368 return *this;
369 }
370 };
371
372 public:
373 bitvector_base() : m_bits(0) {}
374
375 bitvector_base(size_type bits) : m_bits(bits), m_blocks(ceil_toblocks(bits)) {}
376
377 /**
378 * Initialize the bitvector from a byte-array. Bits are taken byte-wise
379 * from least significant to most significant. Example::
380 *
381 * bitvector[0] -> LSB(Byte[0])
382 * bitvector[1] -> LSB+1(Byte[0])
383 * ...
384 * bitvector[8] -> LSB(Byte[1])
385 *
386 * @param bytes The byte vector to be loaded
387 * @param bits The number of bits to be loaded. This must not be more
388 * than the number of bytes in @p bytes.
389 */
390 bitvector_base(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
391 from_bytes(bytes, bits);
392 }
393
394 bitvector_base(std::initializer_list<block_type> blocks, std::optional<size_type> bits = std::nullopt) :
395 m_bits(bits.value_or(blocks.size() * block_size_bits)), m_blocks(blocks.begin(), blocks.end()) {}
396
397 bool empty() const { return m_bits == 0; }
398
399 size_type size() const { return m_bits; }
400
401 /**
402 * @returns true iff the number of 1-bits in this is odd, false otherwise (constant time)
403 */
405 uint64_t acc = 0;
406 full_range_operation([&](std::unsigned_integral auto block) { acc ^= block; }, *this);
407
408 for(size_t i = (sizeof(acc) * 8) >> 1; i > 0; i >>= 1) {
409 acc ^= acc >> i;
410 }
411
412 return CT::Choice::from_int(acc & one);
413 }
414
415 /**
416 * Counts the number of 1-bits in the bitvector in constant time.
417 * @returns the "population count" (or hamming weight) of the bitvector
418 */
420 size_type acc = 0;
421 full_range_operation([&](std::unsigned_integral auto block) { acc += ct_popcount(block); }, *this);
422 return acc;
423 }
424
425 /**
426 * @returns copies this bitvector into a new bitvector of type @p OutT
427 */
428 template <bitvectorish OutT>
429 OutT as() const {
430 return subvector<OutT>(0, size());
431 }
432
433 /**
434 * @returns true if @p other contains the same bit pattern as this
435 */
436 template <bitvectorish OtherT>
437 bool equals_vartime(const OtherT& other) const noexcept {
438 return size() == other.size() &&
439 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) { return lhs == rhs; },
440 *this,
441 unwrap_strong_type(other));
442 }
443
444 /**
445 * @returns true if @p other contains the same bit pattern as this
446 */
447 template <bitvectorish OtherT>
448 bool equals(const OtherT& other) const noexcept {
449 return (*this ^ other).none();
450 }
451
452 /// @name Serialization
453 /// @{
454
455 /**
456 * Re-initialize the bitvector with the given bytes. See the respective
457 * constructor for details. This should be used only when trying to save
458 * allocations. Otherwise, use the constructor.
459 *
460 * @param bytes the byte range to load bits from
461 * @param bits (optional) if not all @p bytes should be loaded in full
462 */
463 void from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
464 m_bits = bits.value_or(bytes.size_bytes() * 8);
465 BOTAN_ARG_CHECK(m_bits <= bytes.size_bytes() * 8, "not enough data to load so many bits");
466 resize(m_bits);
467
468 // load as much aligned data as possible
469 const auto verbatim_blocks = m_bits / block_size_bits;
470 const auto verbatim_bytes = verbatim_blocks * block_size_bytes;
471 if(verbatim_blocks > 0) {
472 typecast_copy(std::span{m_blocks}.first(verbatim_blocks), bytes.first(verbatim_bytes));
473 }
474
475 // load remaining unaligned data
476 for(size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
477 ref(i) = ((bytes[i >> 3] & (uint8_t(1) << (i & 7))) != 0);
478 }
479 }
480
481 /**
482 * Renders the bitvector into a byte array. By default, this will use
483 * `std::vector<uint8_t>` or `Botan::secure_vector<uint8_t>`, depending on
484 * the allocator used by the bitvector. The rendering is compatible with
485 * the bit layout explained in the respective constructor.
486 */
487 template <concepts::resizable_byte_buffer OutT =
488 std::conditional_t<uses_secure_allocator, secure_vector<uint8_t>, std::vector<uint8_t>>>
489 OutT to_bytes() const {
490 OutT out(ceil_tobytes(m_bits));
491 to_bytes(out);
492 return out;
493 }
494
495 /**
496 * Renders the bitvector into a properly sized byte range.
497 *
498 * @param out a byte range that has a length of at least `ceil_tobytes(size())`.
499 */
500 void to_bytes(std::span<uint8_t> out) const {
501 const auto bytes_needed = ceil_tobytes(m_bits);
502 BOTAN_ARG_CHECK(bytes_needed <= out.size_bytes(), "Not enough space to render bitvector");
503
504 // copy as much aligned data as possible
505 const auto verbatim_blocks = m_bits / block_size_bits;
506 const auto verbatim_bytes = verbatim_blocks * block_size_bytes;
507 if(verbatim_blocks > 0) {
508 typecast_copy(out.first(verbatim_bytes), std::span{m_blocks}.first(verbatim_blocks));
509 }
510
511 // copy remaining unaligned data
512 clear_mem(out.subspan(verbatim_bytes));
513 for(size_type i = verbatim_bytes * 8; i < m_bits; ++i) {
514 out[i >> 3] |= ref(i).template as<uint8_t>() << (i & 7);
515 }
516 }
517
518 /**
519 * Renders this bitvector into a sequence of "0"s and "1"s.
520 * This is meant for debugging purposes and is not efficient.
521 */
522 std::string to_string() const {
523 std::stringstream ss;
524 for(size_type i = 0; i < size(); ++i) {
525 ss << ref(i);
526 }
527 return ss.str();
528 }
529
530 /// @}
531
532 /// @name Capacity Accessors and Modifiers
533 /// @{
534
535 size_type capacity() const { return m_blocks.capacity() * block_size_bits; }
536
537 void reserve(size_type bits) { m_blocks.reserve(ceil_toblocks(bits)); }
538
539 void resize(size_type bits) {
540 const auto new_number_of_blocks = ceil_toblocks(bits);
541 if(new_number_of_blocks != m_blocks.size()) {
542 m_blocks.resize(new_number_of_blocks);
543 }
544
545 m_bits = bits;
546 zero_unused_bits();
547 }
548
549 void push_back(bool bit) {
550 const auto i = size();
551 resize(i + 1);
552 ref(i) = bit;
553 }
554
555 void pop_back() {
556 if(!empty()) {
557 resize(size() - 1);
558 }
559 }
560
561 /// @}
562
563 /// @name Bitwise and Global Accessors and Modifiers
564 /// @{
565
566 auto at(size_type pos) {
567 check_offset(pos);
568 return ref(pos);
569 }
570
571 // TODO C++23: deducing this
572 auto at(size_type pos) const {
573 check_offset(pos);
574 return ref(pos);
575 }
576
577 auto front() { return ref(0); }
578
579 // TODO C++23: deducing this
580 auto front() const { return ref(0); }
581
582 auto back() { return ref(size() - 1); }
583
584 // TODO C++23: deducing this
585 auto back() const { return ref(size() - 1); }
586
587 /**
588 * Sets the bit at position @p pos.
589 * @throws Botan::Invalid_Argument if @p pos is out of range
590 */
592 check_offset(pos);
593 ref(pos).set();
594 return *this;
595 }
596
597 /**
598 * Sets all currently allocated bits.
599 */
601 full_range_operation(
602 [](std::unsigned_integral auto block) -> decltype(block) { return static_cast<decltype(block)>(~0); },
603 *this);
604 zero_unused_bits();
605 return *this;
606 }
607
608 /**
609 * Unsets the bit at position @p pos.
610 * @throws Botan::Invalid_Argument if @p pos is out of range
611 */
613 check_offset(pos);
614 ref(pos).unset();
615 return *this;
616 }
617
618 /**
619 * Unsets all currently allocated bits.
620 */
622 full_range_operation(
623 [](std::unsigned_integral auto block) -> decltype(block) { return static_cast<decltype(block)>(0); },
624 *this);
625 return *this;
626 }
627
628 /**
629 * Flips the bit at position @p pos.
630 * @throws Botan::Invalid_Argument if @p pos is out of range
631 */
633 check_offset(pos);
634 ref(pos).flip();
635 return *this;
636 }
637
638 /**
639 * Flips all currently allocated bits.
640 */
642 full_range_operation([](std::unsigned_integral auto block) -> decltype(block) { return ~block; }, *this);
643 zero_unused_bits();
644 return *this;
645 }
646
647 /**
648 * @returns true iff no bit is set
649 */
650 bool none_vartime() const {
651 return full_range_operation([](std::unsigned_integral auto block) { return block == 0; }, *this);
652 }
653
654 /**
655 * @returns true iff no bit is set in constant time
656 */
657 bool none() const { return hamming_weight() == 0; }
658
659 /**
660 * @returns true iff at least one bit is set
661 */
662 bool any_vartime() const { return !none_vartime(); }
663
664 /**
665 * @returns true iff at least one bit is set in constant time
666 */
667 bool any() const { return !none(); }
668
669 /**
670 * @returns true iff all bits are set
671 */
672 bool all_vartime() const {
673 return full_range_operation(
674 []<std::unsigned_integral BlockT>(BlockT block, BlockT mask) { return block == mask; }, *this);
675 }
676
677 /**
678 * @returns true iff all bits are set in constant time
679 */
680 bool all() const { return hamming_weight() == m_bits; }
681
682 auto operator[](size_type pos) { return ref(pos); }
683
684 // TODO C++23: deducing this
685 auto operator[](size_type pos) const { return ref(pos); }
686
687 /// @}
688
689 /// @name Subvectors
690 /// @{
691
692 /**
693 * Creates a new bitvector with a subsection of this bitvector starting at
694 * @p pos copying exactly @p length bits.
695 */
696 template <bitvectorish OutT = bitvector_base<AllocatorT>>
697 auto subvector(size_type pos, std::optional<size_type> length = std::nullopt) const {
698 size_type bitlen = length.value_or(size() - pos);
699 BOTAN_ARG_CHECK(pos + bitlen <= size(), "Not enough bits to copy");
700
701 OutT newvector(bitlen);
702
703 // Handle bitvectors that are wrapped in strong types
704 auto& newvector_unwrapped = unwrap_strong_type(newvector);
705
706 if(bitlen > 0) {
707 if(pos % 8 == 0) {
708 copy_mem(
709 newvector_unwrapped.m_blocks,
710 std::span{m_blocks}.subspan(block_index(pos), block_index(pos + bitlen - 1) - block_index(pos) + 1));
711 } else {
712 BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> from_op(
713 *this, pos, bitlen);
714 BitRangeOperator<strong_type_wrapped_type<OutT>> to_op(
715 unwrap_strong_type(newvector_unwrapped), 0, bitlen);
716 range_operation([](auto /* to */, auto from) { return from; }, to_op, from_op);
717 }
718
719 newvector_unwrapped.zero_unused_bits();
720 }
721
722 return newvector;
723 }
724
725 /**
726 * Extracts a subvector of bits as an unsigned integral type @p OutT
727 * starting from bit @p pos and copying exactly sizeof(OutT)*8 bits.
728 *
729 * Hint: The bits are in big-endian order, i.e. the least significant bit
730 * is the 0th bit and the most significant bit it the n-th. Hence,
731 * addressing the bits with bitwise operations is done like so:
732 * bool bit = (out_int >> pos) & 1;
733 */
734 template <typename OutT>
735 requires(std::unsigned_integral<strong_type_wrapped_type<OutT>> &&
736 !std::same_as<bool, strong_type_wrapped_type<OutT>>)
737 OutT subvector(size_type pos) const {
738 using result_t = strong_type_wrapped_type<OutT>;
739 constexpr size_t bits = sizeof(result_t) * 8;
740 BOTAN_ARG_CHECK(pos + bits <= size(), "Not enough bits to copy");
741 result_t out = 0;
742
743 if(pos % 8 == 0) {
744 out = load_le<result_t>(std::span{m_blocks}.subspan(block_index(pos)).template first<sizeof(result_t)>());
745 } else {
746 BitRangeOperator<const bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*this, pos, bits);
747 range_operation(
748 [&](std::unsigned_integral auto integer) {
749 if constexpr(std::same_as<result_t, decltype(integer)>) {
750 out = integer;
751 }
752 },
753 op);
754 }
755
756 return wrap_strong_type<OutT>(out);
757 }
758
759 /**
760 * Replaces a subvector of bits with the bits of another bitvector @p value
761 * starting at bit @p pos. The number of bits to replace is determined by
762 * the size of @p value.
763 *
764 * @note This is currently supported for byte-aligned @p pos only.
765 *
766 * @throws Not_Implemented when called with @p pos not divisible by 8.
767 *
768 * @param pos the position to start replacing bits
769 * @param value the bitvector to copy bits from
770 */
771 template <typename InT>
772 requires(std::unsigned_integral<strong_type_wrapped_type<InT>> && !std::same_as<bool, InT>)
773 void subvector_replace(size_type pos, InT value) {
775 constexpr size_t bits = sizeof(in_t) * 8;
776 BOTAN_ARG_CHECK(pos + bits <= size(), "Not enough bits to replace");
777
778 if(pos % 8 == 0) {
779 store_le(std::span{m_blocks}.subspan(block_index(pos)).template first<sizeof(in_t)>(),
780 unwrap_strong_type(value));
781 } else {
782 BitRangeOperator<bitvector_base<AllocatorT>, BitRangeAlignment::no_alignment> op(*this, pos, bits);
783 range_operation(
784 [&]<std::unsigned_integral BlockT>(BlockT block) -> BlockT {
785 if constexpr(std::same_as<in_t, BlockT>) {
786 return unwrap_strong_type(value);
787 } else {
788 // This should never be reached. BOTAN_ASSERT_UNREACHABLE()
789 // caused warning "unreachable code" on MSVC, though. You
790 // don't say!
791 //
792 // Returning the given block back, is the most reasonable
793 // thing to do in this case, though.
794 return block;
795 }
796 },
797 op);
798 }
799 }
800
801 /// @}
802
803 /// @name Operators
804 ///
805 /// @{
806
807 auto operator~() {
808 auto newbv = *this;
809 newbv.flip();
810 return newbv;
811 }
812
813 template <bitvectorish OtherT>
814 auto& operator|=(const OtherT& other) {
815 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs | rhs; },
816 *this,
817 unwrap_strong_type(other));
818 return *this;
819 }
820
821 template <bitvectorish OtherT>
822 auto& operator&=(const OtherT& other) {
823 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs & rhs; },
824 *this,
825 unwrap_strong_type(other));
826 return *this;
827 }
828
829 template <bitvectorish OtherT>
830 auto& operator^=(const OtherT& other) {
831 full_range_operation([]<std::unsigned_integral BlockT>(BlockT lhs, BlockT rhs) -> BlockT { return lhs ^ rhs; },
832 *this,
833 unwrap_strong_type(other));
834 return *this;
835 }
836
837 /// @}
838
839 /// @name Constant Time Operations
840 ///
841 /// @{
842
843 /**
844 * Implements::
845 *
846 * if(condition) {
847 * *this ^= other;
848 * }
849 *
850 * omitting runtime dependence on any of the parameters.
851 */
852 template <bitvectorish OtherT>
853 void ct_conditional_xor(CT::Choice condition, const OtherT& other) {
854 BOTAN_ASSERT_NOMSG(m_bits == other.m_bits);
855 BOTAN_ASSERT_NOMSG(m_blocks.size() == other.m_blocks.size());
856
857 auto maybe_xor = overloaded{
858 [m = CT::Mask<uint64_t>::from_choice(condition)](uint64_t lhs, uint64_t rhs) -> uint64_t {
859 return lhs ^ m.if_set_return(rhs);
860 },
861 [m = CT::Mask<uint32_t>::from_choice(condition)](uint32_t lhs, uint32_t rhs) -> uint32_t {
862 return lhs ^ m.if_set_return(rhs);
863 },
864 [m = CT::Mask<uint16_t>::from_choice(condition)](uint16_t lhs, uint16_t rhs) -> uint16_t {
865 return lhs ^ m.if_set_return(rhs);
866 },
867 [m = CT::Mask<uint8_t>::from_choice(condition)](uint8_t lhs, uint8_t rhs) -> uint8_t {
868 return lhs ^ m.if_set_return(rhs);
869 },
870 };
871
872 full_range_operation(maybe_xor, *this, unwrap_strong_type(other));
873 }
874
875 constexpr void _const_time_poison() const { CT::poison(m_blocks); }
876
877 constexpr void _const_time_unpoison() const { CT::unpoison(m_blocks); }
878
879 /// @}
880
881 /// @name Iterators
882 ///
883 /// @{
884
885 iterator begin() noexcept { return iterator(this, 0); }
886
887 const_iterator begin() const noexcept { return const_iterator(this, 0); }
888
889 const_iterator cbegin() const noexcept { return const_iterator(this, 0); }
890
891 iterator end() noexcept { return iterator(this, size()); }
892
893 const_iterator end() const noexcept { return const_iterator(this, size()); }
894
895 const_iterator cend() noexcept { return const_iterator(this, size()); }
896
897 /// @}
898
899 private:
900 void check_offset(size_type pos) const {
901 // BOTAN_ASSERT_NOMSG(!CT::is_poisoned(&m_bits, sizeof(m_bits)));
902 // BOTAN_ASSERT_NOMSG(!CT::is_poisoned(&pos, sizeof(pos)));
903 BOTAN_ARG_CHECK(pos < m_bits, "Out of range");
904 }
905
906 void zero_unused_bits() {
907 const auto first_unused_bit = size();
908
909 // Zero out any unused bits in the last block
910 if(first_unused_bit % block_size_bits != 0) {
911 const block_type mask = (one << block_offset(first_unused_bit)) - one;
912 m_blocks[block_index(first_unused_bit)] &= mask;
913 }
914 }
915
916 static constexpr size_type ceil_toblocks(size_type bits) {
917 return (bits + block_size_bits - 1) / block_size_bits;
918 }
919
920 auto ref(size_type pos) const { return bitref<const block_type>(m_blocks, pos); }
921
922 auto ref(size_type pos) { return bitref<block_type>(m_blocks, pos); }
923
924 private:
925 enum class BitRangeAlignment { byte_aligned, no_alignment };
926
927 /**
928 * Helper construction to implement bit range operations on the bitvector.
929 * It basically implements an iterator to read and write blocks of bits
930 * from the underlying bitvector. Where "blocks of bits" are unsigned
931 * integers of varying bit lengths.
932 *
933 * If the iteration starts at a byte boundary in the underlying bitvector,
934 * this applies certain optimizations (i.e. loading blocks of bits straight
935 * from the underlying byte buffer). The optimizations are enabled at
936 * compile time (with the template parameter `alignment`).
937 */
938 template <typename BitvectorT, auto alignment = BitRangeAlignment::byte_aligned>
939 requires is_bitvector_v<std::remove_cvref_t<BitvectorT>>
940 class BitRangeOperator {
941 private:
942 constexpr static bool is_const() { return std::is_const_v<BitvectorT>; }
943
944 struct UnalignedDataHelper {
945 const uint8_t padding_bits;
946 const uint8_t bits_to_byte_alignment;
947 };
948
949 public:
950 BitRangeOperator(BitvectorT& source, size_type start_bitoffset, size_type bitlength) :
951 m_source(source),
952 m_start_bitoffset(start_bitoffset),
953 m_bitlength(bitlength),
954 m_unaligned_helper({.padding_bits = static_cast<uint8_t>(start_bitoffset % 8),
955 .bits_to_byte_alignment = static_cast<uint8_t>(8 - (start_bitoffset % 8))}),
956 m_read_bitpos(start_bitoffset),
957 m_write_bitpos(start_bitoffset) {
958 BOTAN_ASSERT(is_byte_aligned() == (m_start_bitoffset % 8 == 0), "byte alignment guarantee");
959 BOTAN_ASSERT(m_source.size() >= m_start_bitoffset + m_bitlength, "enough bytes in underlying source");
960 }
961
962 BitRangeOperator(BitvectorT& source) : BitRangeOperator(source, 0, source.size()) {}
963
964 static constexpr bool is_byte_aligned() { return alignment == BitRangeAlignment::byte_aligned; }
965
966 /**
967 * @returns the overall number of bits to be iterated with this operator
968 */
969 size_type size() const { return m_bitlength; }
970
971 /**
972 * @returns the number of bits not yet read from this operator
973 */
974 size_type bits_to_read() const { return m_bitlength - m_read_bitpos + m_start_bitoffset; }
975
976 /**
977 * @returns the number of bits still to be written into this operator
978 */
979 size_type bits_to_write() const { return m_bitlength - m_write_bitpos + m_start_bitoffset; }
980
981 /**
982 * Loads the next block of bits from the underlying bitvector. No
983 * bounds checks are performed. The caller can define the size of
984 * the resulting unsigned integer block.
985 */
986 template <std::unsigned_integral BlockT>
987 BlockT load_next() const {
988 constexpr size_type block_size = sizeof(BlockT);
989 constexpr size_type block_bits = block_size * 8;
990 const auto bits_remaining = bits_to_read();
991
992 BlockT result_block = 0;
993 if constexpr(is_byte_aligned()) {
994 result_block = load_le(m_source.as_byte_span().subspan(read_bytepos()).template first<block_size>());
995 } else {
996 const size_type byte_pos = read_bytepos();
997 const size_type bits_to_collect = std::min(block_bits, bits_to_read());
998
999 const uint8_t first_byte = m_source.as_byte_span()[byte_pos];
1000
1001 // Initialize the left-most bits from the first byte.
1002 result_block = BlockT(first_byte) >> m_unaligned_helper.padding_bits;
1003
1004 // If more bits are needed, we pull them from the remaining bytes.
1005 if(m_unaligned_helper.bits_to_byte_alignment < bits_to_collect) {
1006 const BlockT block =
1007 load_le(m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>());
1008 result_block |= block << m_unaligned_helper.bits_to_byte_alignment;
1009 }
1010 }
1011
1012 m_read_bitpos += std::min(block_bits, bits_remaining);
1013 return result_block;
1014 }
1015
1016 /**
1017 * Stores the next block of bits into the underlying bitvector.
1018 * No bounds checks are performed. Storing bit blocks that are not
1019 * aligned at a byte-boundary in the underlying bitvector is
1020 * currently not implemented.
1021 */
1022 template <std::unsigned_integral BlockT>
1023 requires(!is_const())
1024 void store_next(BlockT block) {
1025 constexpr size_type block_size = sizeof(BlockT);
1026 constexpr size_type block_bits = block_size * 8;
1027
1028 if constexpr(is_byte_aligned()) {
1029 auto sink = m_source.as_byte_span().subspan(write_bytepos()).template first<block_size>();
1030 store_le(sink, block);
1031 } else {
1032 const size_type byte_pos = write_bytepos();
1033 const size_type bits_to_store = std::min(block_bits, bits_to_write());
1034
1035 uint8_t& first_byte = m_source.as_byte_span()[byte_pos];
1036
1037 // Set the left-most bits in the first byte, leaving all others unchanged
1038 first_byte = (first_byte & uint8_t(0xFF >> m_unaligned_helper.bits_to_byte_alignment)) |
1039 uint8_t(block << m_unaligned_helper.padding_bits);
1040
1041 // If more bits are provided, we store them in the remaining bytes.
1042 if(m_unaligned_helper.bits_to_byte_alignment < bits_to_store) {
1043 const auto remaining_bytes =
1044 m_source.as_byte_span().subspan(byte_pos + 1).template first<block_size>();
1045 const BlockT padding_mask = ~(BlockT(-1) >> m_unaligned_helper.bits_to_byte_alignment);
1046 const BlockT new_bytes =
1047 (load_le(remaining_bytes) & padding_mask) | block >> m_unaligned_helper.bits_to_byte_alignment;
1048 store_le(remaining_bytes, new_bytes);
1049 }
1050 }
1051
1052 m_write_bitpos += std::min(block_bits, bits_to_write());
1053 }
1054
1055 template <std::unsigned_integral BlockT>
1056 requires(is_byte_aligned() && !is_const())
1057 std::span<BlockT> span(size_type blocks) const {
1058 BOTAN_DEBUG_ASSERT(blocks == 0 || is_memory_aligned_to<BlockT>());
1059 BOTAN_DEBUG_ASSERT(read_bytepos() % sizeof(BlockT) == 0);
1060 // Intermittently casting to void* to avoid a compiler warning
1061 void* ptr = reinterpret_cast<void*>(m_source.as_byte_span().data() + read_bytepos());
1062 return {reinterpret_cast<BlockT*>(ptr), blocks};
1063 }
1064
1065 template <std::unsigned_integral BlockT>
1066 requires(is_byte_aligned() && is_const())
1067 std::span<const BlockT> span(size_type blocks) const {
1068 BOTAN_DEBUG_ASSERT(blocks == 0 || is_memory_aligned_to<BlockT>());
1069 BOTAN_DEBUG_ASSERT(read_bytepos() % sizeof(BlockT) == 0);
1070 // Intermittently casting to void* to avoid a compiler warning
1071 const void* ptr = reinterpret_cast<const void*>(m_source.as_byte_span().data() + read_bytepos());
1072 return {reinterpret_cast<const BlockT*>(ptr), blocks};
1073 }
1074
1075 void advance(size_type bytes)
1076 requires(is_byte_aligned())
1077 {
1078 m_read_bitpos += bytes * 8;
1079 m_write_bitpos += bytes * 8;
1080 }
1081
1082 template <std::unsigned_integral BlockT>
1083 requires(is_byte_aligned())
1084 size_t is_memory_aligned_to() const {
1085 const void* cptr = m_source.as_byte_span().data() + read_bytepos();
1086 const void* ptr_before = cptr;
1087
1088 // std::align takes `ptr` as a reference (!), i.e. `void*&` and
1089 // uses it as an out-param. Though, `cptr` is const because this
1090 // method is const-qualified, hence the const_cast<>.
1091 void* ptr = const_cast<void*>(cptr);
1092 size_t size = sizeof(BlockT);
1093 return ptr_before != nullptr && std::align(alignof(BlockT), size, ptr, size) == ptr_before;
1094 }
1095
1096 private:
1097 size_type read_bytepos() const { return m_read_bitpos / 8; }
1098
1099 size_type write_bytepos() const { return m_write_bitpos / 8; }
1100
1101 private:
1102 BitvectorT& m_source;
1103 size_type m_start_bitoffset;
1104 size_type m_bitlength;
1105
1106 UnalignedDataHelper m_unaligned_helper;
1107
1108 mutable size_type m_read_bitpos;
1109 mutable size_type m_write_bitpos;
1110 };
1111
1112 /**
1113 * Helper struct for the low-level handling of blockwise operations
1114 *
1115 * This has two main code paths: Optimized for byte-aligned ranges that
1116 * can simply be taken from memory as-is. And a generic implementation
1117 * that must assemble blocks from unaligned bits before processing.
1118 */
1119 template <typename FnT, typename... ParamTs>
1120 requires detail::blockwise_processing_callback<FnT, ParamTs...>
1121 class blockwise_processing_callback_trait {
1122 public:
1123 constexpr static bool needs_mask = detail::blockwise_processing_callback_with_mask<FnT, ParamTs...>;
1124 constexpr static bool is_manipulator = detail::manipulating_blockwise_processing_callback<FnT, ParamTs...>;
1125 constexpr static bool is_predicate = detail::predicate_blockwise_processing_callback<FnT, ParamTs...>;
1126 static_assert(!is_manipulator || !is_predicate, "cannot be manipulator and predicate at the same time");
1127
1128 /**
1129 * Applies @p fn to the blocks provided in @p blocks by simply reading from
1130 * memory without re-arranging any bits across byte-boundaries.
1131 */
1132 template <std::unsigned_integral... BlockTs>
1133 requires(all_same_v<std::remove_cv_t<BlockTs>...> && sizeof...(BlockTs) == sizeof...(ParamTs))
1134 constexpr static bool apply_on_full_blocks(FnT fn, std::span<BlockTs>... blocks) {
1135 constexpr size_type bits = sizeof(detail::first_t<BlockTs...>) * 8;
1136 const size_type iterations = detail::first(blocks...).size();
1137 for(size_type i = 0; i < iterations; ++i) {
1138 if constexpr(is_predicate) {
1139 if(!apply(fn, bits, blocks[i]...)) {
1140 return false;
1141 }
1142 } else if constexpr(is_manipulator) {
1143 detail::first(blocks...)[i] = apply(fn, bits, blocks[i]...);
1144 } else {
1145 apply(fn, bits, blocks[i]...);
1146 }
1147 }
1148 return true;
1149 }
1150
1151 /**
1152 * Applies @p fn to as many blocks as @p ops provide for the given type.
1153 */
1154 template <std::unsigned_integral BlockT, typename... BitRangeOperatorTs>
1155 requires(sizeof...(BitRangeOperatorTs) == sizeof...(ParamTs))
1156 constexpr static bool apply_on_unaligned_blocks(FnT fn, BitRangeOperatorTs&... ops) {
1157 constexpr size_type block_bits = sizeof(BlockT) * 8;
1158 auto bits = detail::first(ops...).bits_to_read();
1159 if(bits == 0) {
1160 return true;
1161 }
1162
1163 bits += block_bits; // avoid unsigned integer underflow in the following loop
1164 while(bits > block_bits * 2 - 8) {
1165 bits -= block_bits;
1166 if constexpr(is_predicate) {
1167 if(!apply(fn, bits, ops.template load_next<BlockT>()...)) {
1168 return false;
1169 }
1170 } else if constexpr(is_manipulator) {
1171 detail::first(ops...).store_next(apply(fn, bits, ops.template load_next<BlockT>()...));
1172 } else {
1173 apply(fn, bits, ops.template load_next<BlockT>()...);
1174 }
1175 }
1176 return true;
1177 }
1178
1179 private:
1180 template <std::unsigned_integral... BlockTs>
1181 requires(all_same_v<BlockTs...>)
1182 constexpr static auto apply(FnT fn, size_type bits, BlockTs... blocks) {
1183 if constexpr(needs_mask) {
1184 return fn(blocks..., make_mask<detail::first_t<BlockTs...>>(bits));
1185 } else {
1186 return fn(blocks...);
1187 }
1188 }
1189 };
1190
1191 /**
1192 * Helper function of `full_range_operation` and `range_operation` that
1193 * calls @p fn on a given aligned unsigned integer block as long as the
1194 * underlying bit range contains enough bits to fill the block fully.
1195 *
1196 * This uses bare memory access to gain a speed up for aligned data.
1197 */
1198 template <std::unsigned_integral BlockT, typename FnT, typename... BitRangeOperatorTs>
1199 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1200 sizeof...(BitRangeOperatorTs) > 0)
1201 static bool _process_in_fully_aligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
1202 constexpr size_type block_bytes = sizeof(BlockT);
1203 constexpr size_type block_bits = block_bytes * 8;
1204 const size_type blocks = detail::first(ops...).bits_to_read() / block_bits;
1205
1206 using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1207 const auto result = callback_trait::apply_on_full_blocks(fn, ops.template span<BlockT>(blocks)...);
1208 (ops.advance(block_bytes * blocks), ...);
1209 return result;
1210 }
1211
1212 /**
1213 * Helper function of `full_range_operation` and `range_operation` that
1214 * calls @p fn on a given unsigned integer block size as long as the
1215 * underlying bit range contains enough bits to fill the block.
1216 */
1217 template <std::unsigned_integral BlockT, typename FnT, typename... BitRangeOperatorTs>
1218 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...>)
1219 static bool _process_in_unaligned_blocks_of(FnT fn, BitRangeOperatorTs&... ops) {
1220 using callback_trait = blockwise_processing_callback_trait<FnT, BitRangeOperatorTs...>;
1221 return callback_trait::template apply_on_unaligned_blocks<BlockT>(fn, ops...);
1222 }
1223
1224 /**
1225 * Apply @p fn to all bits in the ranges defined by @p ops. If more than
1226 * one range operator is passed to @p ops, @p fn receives corresponding
1227 * blocks of bits from each operator. Therefore, all @p ops have to define
1228 * the exact same length of their underlying ranges.
1229 *
1230 * @p fn may return a bit block that will be stored into the _first_ bit
1231 * range passed into @p ops. If @p fn returns a boolean, and its value is
1232 * `false`, the range operation is cancelled and `false` is returned.
1233 *
1234 * The implementation ensures to pull bits in the largest bit blocks
1235 * possible and reverts to smaller bit blocks only when needed.
1236 */
1237 template <typename FnT, typename... BitRangeOperatorTs>
1238 requires(detail::blockwise_processing_callback<FnT, BitRangeOperatorTs...> &&
1239 sizeof...(BitRangeOperatorTs) > 0)
1240 static bool range_operation(FnT fn, BitRangeOperatorTs... ops) {
1241 BOTAN_ASSERT(has_equal_lengths(ops...), "all BitRangeOperators have the same length");
1242
1243 if constexpr((BitRangeOperatorTs::is_byte_aligned() && ...)) {
1244 // Note: At the moment we can assume that this will always be used
1245 // on the _entire_ bitvector. Therefore, we can safely assume
1246 // that the bitvectors' underlying buffers are properly aligned.
1247 // If this assumption changes, we need to add further handling
1248 // to process a byte padding at the beginning of the bitvector
1249 // until a memory alignment boundary is reached.
1250 const bool alignment = (ops.template is_memory_aligned_to<uint64_t>() && ...);
1251 BOTAN_ASSERT_NOMSG(alignment);
1252
1253 return _process_in_fully_aligned_blocks_of<uint64_t>(fn, ops...) &&
1254 _process_in_fully_aligned_blocks_of<uint32_t>(fn, ops...) &&
1255 _process_in_fully_aligned_blocks_of<uint16_t>(fn, ops...) &&
1256 _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
1257 } else {
1258 return _process_in_unaligned_blocks_of<uint64_t>(fn, ops...) &&
1259 _process_in_unaligned_blocks_of<uint32_t>(fn, ops...) &&
1260 _process_in_unaligned_blocks_of<uint16_t>(fn, ops...) &&
1261 _process_in_unaligned_blocks_of<uint8_t>(fn, ops...);
1262 }
1263 }
1264
1265 /**
1266 * Apply @p fn to all bit blocks in the bitvector(s).
1267 */
1268 template <typename FnT, typename... BitvectorTs>
1269 requires(detail::blockwise_processing_callback<FnT, BitvectorTs...> &&
1270 (is_bitvector_v<std::remove_cvref_t<BitvectorTs>> && ... && true))
1271 static bool full_range_operation(FnT&& fn, BitvectorTs&... bitvecs) {
1272 BOTAN_ASSERT(has_equal_lengths(bitvecs...), "all bitvectors have the same length");
1273 return range_operation(std::forward<FnT>(fn), BitRangeOperator<BitvectorTs>(bitvecs)...);
1274 }
1275
1276 template <typename SomeT, typename... SomeTs>
1277 static bool has_equal_lengths(const SomeT& v, const SomeTs&... vs) {
1278 return ((v.size() == vs.size()) && ... && true);
1279 }
1280
1281 template <std::unsigned_integral T>
1282 static constexpr T make_mask(size_type bits) {
1283 const bool max = bits >= sizeof(T) * 8;
1284 bits &= T(max - 1);
1285 return (T(!max) << bits) - 1;
1286 }
1287
1288 auto as_byte_span() { return std::span{m_blocks.data(), m_blocks.size() * sizeof(block_type)}; }
1289
1290 auto as_byte_span() const { return std::span{m_blocks.data(), m_blocks.size() * sizeof(block_type)}; }
1291
1292 private:
1293 size_type m_bits;
1294 std::vector<block_type, allocator_type> m_blocks;
1295};
1296
1299
1300namespace detail {
1301
1302/**
1303 * If one of the allocators is a Botan::secure_allocator, this will always
1304 * prefer it. Otherwise, the allocator of @p lhs will be used as a default.
1305 */
1306template <bitvectorish T1, bitvectorish T2>
1307constexpr auto copy_lhs_allocator_aware(const T1& lhs, const T2&) {
1308 constexpr bool needs_secure_allocator =
1310
1311 if constexpr(needs_secure_allocator) {
1312 return lhs.template as<secure_bitvector>();
1313 } else {
1314 return lhs.template as<bitvector>();
1315 }
1316}
1317
1318} // namespace detail
1319
1320template <bitvectorish T1, bitvectorish T2>
1321auto operator|(const T1& lhs, const T2& rhs) {
1322 auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
1323 res |= rhs;
1324 return res;
1325}
1326
1327template <bitvectorish T1, bitvectorish T2>
1328auto operator&(const T1& lhs, const T2& rhs) {
1329 auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
1330 res &= rhs;
1331 return res;
1332}
1333
1334template <bitvectorish T1, bitvectorish T2>
1335auto operator^(const T1& lhs, const T2& rhs) {
1336 auto res = detail::copy_lhs_allocator_aware(lhs, rhs);
1337 res ^= rhs;
1338 return res;
1339}
1340
1341template <bitvectorish T1, bitvectorish T2>
1342bool operator==(const T1& lhs, const T2& rhs) {
1343 return lhs.equals_vartime(rhs);
1344}
1345
1346template <bitvectorish T1, bitvectorish T2>
1347bool operator!=(const T1& lhs, const T2& rhs) {
1348 return lhs.equals_vartime(rhs);
1349}
1350
1351namespace detail {
1352
1353/**
1354 * A Strong<> adapter for arbitrarily large bitvectors
1355 */
1356template <concepts::container T>
1357 requires is_bitvector_v<T>
1358class Strong_Adapter<T> : public Container_Strong_Adapter_Base<T> {
1359 public:
1360 using size_type = typename T::size_type;
1361
1362 public:
1363 using Container_Strong_Adapter_Base<T>::Container_Strong_Adapter_Base;
1364
1365 auto at(size_type i) const { return this->get().at(i); }
1366
1367 auto at(size_type i) { return this->get().at(i); }
1368
1369 auto set(size_type i) { return this->get().set(i); }
1370
1371 auto unset(size_type i) { return this->get().unset(i); }
1372
1373 auto flip(size_type i) { return this->get().flip(i); }
1374
1375 auto flip() { return this->get().flip(); }
1376
1377 template <typename OutT>
1378 auto as() const {
1379 return this->get().template as<OutT>();
1380 }
1381
1382 template <bitvectorish OutT = T>
1383 auto subvector(size_type pos, std::optional<size_type> length = std::nullopt) const {
1384 return this->get().template subvector<OutT>(pos, length);
1385 }
1386
1387 template <typename OutT>
1388 requires(std::unsigned_integral<strong_type_wrapped_type<OutT>> &&
1389 !std::same_as<bool, strong_type_wrapped_type<OutT>>)
1390 auto subvector(size_type pos) const {
1391 return this->get().template subvector<OutT>(pos);
1392 }
1393
1394 template <typename InT>
1395 requires(std::unsigned_integral<strong_type_wrapped_type<InT>> && !std::same_as<bool, InT>)
1396 void subvector_replace(size_type pos, InT value) {
1397 return this->get().subvector_replace(pos, value);
1398 }
1399
1400 template <bitvectorish OtherT>
1401 auto equals(const OtherT& other) const {
1402 return this->get().equals(other);
1403 }
1404
1405 auto push_back(bool b) { return this->get().push_back(b); }
1406
1407 auto pop_back() { return this->get().pop_back(); }
1408
1409 auto front() const { return this->get().front(); }
1410
1411 auto front() { return this->get().front(); }
1412
1413 auto back() const { return this->get().back(); }
1414
1415 auto back() { return this->get().back(); }
1416
1417 auto any_vartime() const { return this->get().any_vartime(); }
1418
1419 auto all_vartime() const { return this->get().all_vartime(); }
1420
1421 auto none_vartime() const { return this->get().none_vartime(); }
1422
1423 auto has_odd_hamming_weight() const { return this->get().has_odd_hamming_weight(); }
1424
1425 auto hamming_weight() const { return this->get().hamming_weight(); }
1426
1427 auto from_bytes(std::span<const uint8_t> bytes, std::optional<size_type> bits = std::nullopt) {
1428 return this->get().from_bytes(bytes, bits);
1429 }
1430
1431 template <typename OutT = T>
1432 auto to_bytes() const {
1433 return this->get().template to_bytes<OutT>();
1434 }
1435
1436 auto to_bytes(std::span<uint8_t> out) const { return this->get().to_bytes(out); }
1437
1438 auto to_string() const { return this->get().to_string(); }
1439
1440 auto capacity() const { return this->get().capacity(); }
1441
1442 auto reserve(size_type n) { return this->get().reserve(n); }
1443
1444 constexpr void _const_time_poison() const { this->get()._const_time_poison(); }
1445
1446 constexpr void _const_time_unpoison() const { this->get()._const_time_unpoison(); }
1447};
1448
1449} // namespace detail
1450
1451} // namespace Botan
1452
1453#endif
#define BOTAN_ASSERT_NOMSG(expr)
Definition assert.h:59
#define BOTAN_DEBUG_ASSERT(expr)
Definition assert.h:98
#define BOTAN_ARG_CHECK(expr, msg)
Definition assert.h:29
#define BOTAN_ASSERT(expr, assertion_made)
Definition assert.h:50
static constexpr Choice from_int(T v)
Definition ct_utils.h:311
static constexpr Mask< T > expand(T v)
Definition ct_utils.h:408
static constexpr Mask< T > from_choice(Choice c)
Definition ct_utils.h:413
bitref(bitref &&) noexcept=default
constexpr bitref & operator&=(bool other) noexcept
Definition bitvector.h:356
constexpr bitref & operator=(bitref &&bit) noexcept
Definition bitvector.h:352
constexpr bitref & flip() noexcept
Definition bitvector.h:337
constexpr bitref & operator=(bool bit) noexcept
Definition bitvector.h:344
constexpr bitref & operator=(const bitref &bit) noexcept
Definition bitvector.h:350
constexpr bitref & operator^=(bool other) noexcept
Definition bitvector.h:366
constexpr bitref & unset() noexcept
Definition bitvector.h:332
bitref(const bitref &) noexcept=default
constexpr bitref & operator|=(bool other) noexcept
Definition bitvector.h:361
const_iterator begin() const noexcept
Definition bitvector.h:887
bitvector_base & flip()
Definition bitvector.h:641
bitvector_base & flip(size_type pos)
Definition bitvector.h:632
bitvector_base & unset()
Definition bitvector.h:621
void reserve(size_type bits)
Definition bitvector.h:537
bitvector_base & unset(size_type pos)
Definition bitvector.h:612
bool none() const
Definition bitvector.h:657
auto at(size_type pos) const
Definition bitvector.h:572
auto operator[](size_type pos) const
Definition bitvector.h:685
CT::Choice has_odd_hamming_weight() const
Definition bitvector.h:404
void subvector_replace(size_type pos, InT value)
Definition bitvector.h:773
constexpr void _const_time_unpoison() const
Definition bitvector.h:877
std::string to_string() const
Definition bitvector.h:522
bitvector_base & set(size_type pos)
Definition bitvector.h:591
static constexpr size_type block_size_bytes
Definition bitvector.h:240
void from_bytes(std::span< const uint8_t > bytes, std::optional< size_type > bits=std::nullopt)
Definition bitvector.h:463
bitvector_base(size_type bits)
Definition bitvector.h:375
size_type hamming_weight() const
Definition bitvector.h:419
auto & operator&=(const OtherT &other)
Definition bitvector.h:822
block_type value_type
Definition bitvector.h:236
const_iterator cbegin() const noexcept
Definition bitvector.h:889
bitvector_base & set()
Definition bitvector.h:600
static constexpr bool uses_secure_allocator
Definition bitvector.h:242
detail::bitvector_iterator< bitvector_base< AllocatorT > > iterator
Definition bitvector.h:237
void ct_conditional_xor(CT::Choice condition, const OtherT &other)
Definition bitvector.h:853
bool none_vartime() const
Definition bitvector.h:650
AllocatorT< block_type > allocator_type
Definition bitvector.h:235
auto operator[](size_type pos)
Definition bitvector.h:682
bool all_vartime() const
Definition bitvector.h:672
void to_bytes(std::span< uint8_t > out) const
Definition bitvector.h:500
size_type capacity() const
Definition bitvector.h:535
friend class bitvector_base
Definition bitvector.h:246
auto & operator|=(const OtherT &other)
Definition bitvector.h:814
auto subvector(size_type pos, std::optional< size_type > length=std::nullopt) const
Definition bitvector.h:697
auto at(size_type pos)
Definition bitvector.h:566
bool empty() const
Definition bitvector.h:397
static constexpr size_type block_size_bits
Definition bitvector.h:241
size_type size() const
Definition bitvector.h:399
iterator begin() noexcept
Definition bitvector.h:885
detail::bitvector_iterator< const bitvector_base< AllocatorT > > const_iterator
Definition bitvector.h:238
void resize(size_type bits)
Definition bitvector.h:539
const_iterator end() const noexcept
Definition bitvector.h:893
bitvector_base(std::span< const uint8_t > bytes, std::optional< size_type > bits=std::nullopt)
Definition bitvector.h:390
bool any_vartime() const
Definition bitvector.h:662
void push_back(bool bit)
Definition bitvector.h:549
auto back() const
Definition bitvector.h:585
iterator end() noexcept
Definition bitvector.h:891
constexpr void _const_time_poison() const
Definition bitvector.h:875
OutT to_bytes() const
Definition bitvector.h:489
OutT subvector(size_type pos) const
Definition bitvector.h:737
auto & operator^=(const OtherT &other)
Definition bitvector.h:830
bool equals(const OtherT &other) const noexcept
Definition bitvector.h:448
auto front() const
Definition bitvector.h:580
const_iterator cend() noexcept
Definition bitvector.h:895
bitvector_base(std::initializer_list< block_type > blocks, std::optional< size_type > bits=std::nullopt)
Definition bitvector.h:394
bool equals_vartime(const OtherT &other) const noexcept
Definition bitvector.h:437
typename T::size_type size_type
Definition bitvector.h:1360
auto equals(const OtherT &other) const
Definition bitvector.h:1401
constexpr void _const_time_poison() const
Definition bitvector.h:1444
void subvector_replace(size_type pos, InT value)
Definition bitvector.h:1396
auto subvector(size_type pos) const
Definition bitvector.h:1390
auto from_bytes(std::span< const uint8_t > bytes, std::optional< size_type > bits=std::nullopt)
Definition bitvector.h:1427
auto subvector(size_type pos, std::optional< size_type > length=std::nullopt) const
Definition bitvector.h:1383
auto at(size_type i) const
Definition bitvector.h:1365
auto to_bytes(std::span< uint8_t > out) const
Definition bitvector.h:1436
constexpr void _const_time_unpoison() const
Definition bitvector.h:1446
constexpr T & get() &
Definition strong_type.h:50
std::bidirectional_iterator_tag iterator_category
Definition bitvector.h:133
bitvector_iterator & operator=(bitvector_iterator &&other) noexcept
Definition bitvector.h:157
std::remove_const_t< decltype(std::declval< T >().at(0))> value_type
Definition bitvector.h:128
std::partial_ordering operator<=>(const bitvector_iterator &other) const noexcept
Definition bitvector.h:185
bitvector_iterator & operator=(const bitvector_iterator &other) noexcept
Definition bitvector.h:149
bool operator==(const bitvector_iterator &other) const noexcept
Definition bitvector.h:193
bitvector_iterator operator--(int) noexcept
Definition bitvector.h:179
bitvector_iterator & operator++() noexcept
Definition bitvector.h:163
bitvector_iterator & operator--() noexcept
Definition bitvector.h:174
bitvector_iterator(bitvector_iterator &&other) noexcept
Definition bitvector.h:145
bitvector_iterator(T *bitvector, size_t offset)
Definition bitvector.h:139
std::make_signed_t< size_type > difference_type
Definition bitvector.h:127
bitvector_iterator operator++(int) noexcept
Definition bitvector.h:168
bitvector_iterator(const bitvector_iterator &other) noexcept
Definition bitvector.h:141
int(* update)(CTX *, const void *, CC_LONG len)
int(* final)(unsigned char *, CTX *)
FE_25519 T
Definition ge.cpp:34
constexpr void unpoison(const T *p, size_t n)
Definition ct_utils.h:64
constexpr void poison(const T *p, size_t n)
Definition ct_utils.h:53
std::invoke_result_t< FnT, as< BlockT, ParamTs >... > blockwise_processing_callback_return_type
Definition bitvector.h:76
constexpr auto copy_lhs_allocator_aware(const T1 &lhs, const T2 &)
Definition bitvector.h:1307
typename first_type< Ts... >::type first_t
Definition bitvector.h:62
ASN1_Type operator|(ASN1_Type x, ASN1_Type y)
Definition asn1_obj.h:75
OctetString operator^(const OctetString &k1, const OctetString &k2)
Definition symkey.cpp:109
constexpr void typecast_copy(ToR &&out, FromR &&in)
Definition mem_ops.h:179
bool operator!=(const AlgorithmIdentifier &a1, const AlgorithmIdentifier &a2)
Definition alg_id.cpp:69
constexpr decltype(auto) unwrap_strong_type(T &&t)
Generically unwraps a strong type to its underlying type.
constexpr auto store_le(ParamTs &&... params)
Definition loadstor.h:764
typename detail::wrapped_type_helper< std::remove_cvref_t< T > >::type strong_type_wrapped_type
Extracts the wrapped type from a strong type.
bool operator==(const AlgorithmIdentifier &a1, const AlgorithmIdentifier &a2)
Definition alg_id.cpp:54
constexpr decltype(auto) wrap_strong_type(ParamT &&t)
Wraps a value into a caller-defined (strong) type.
constexpr auto load_le(ParamTs &&... params)
Definition loadstor.h:521
const SIMD_8x32 & b
constexpr T ceil_tobytes(T bits)
Definition bit_ops.h:168
constexpr auto bitlen(size_t x)
constexpr void copy_mem(T *out, const T *in, size_t n)
Definition mem_ops.h:147
constexpr void clear_mem(T *ptr, size_t n)
Definition mem_ops.h:121
constexpr uint8_t ceil_log2(T x)
Definition bit_ops.h:133
ECIES_Flags operator&(ECIES_Flags a, ECIES_Flags b)
Definition ecies.h:55
constexpr uint8_t ct_popcount(T x)
Definition bit_ops.h:261