Botan 3.5.0
Crypto and TLS for C&
alignment_buffer.h
Go to the documentation of this file.
1/*
2 * Alignment buffer helper
3 * (C) 2023 Jack Lloyd
4 * 2023 René Meusel - Rohde & Schwarz Cybersecurity
5 *
6 * Botan is released under the Simplified BSD License (see license.txt)
7 */
8
9#ifndef BOTAN_ALIGNMENT_BUFFER_H_
10#define BOTAN_ALIGNMENT_BUFFER_H_
11
12#include <botan/concepts.h>
13#include <botan/mem_ops.h>
14#include <botan/internal/stl_util.h>
15
16#include <array>
17#include <optional>
18#include <span>
19
20namespace Botan {
21
22/**
23 * Defines the strategy for handling the final block of input data in the
24 * handle_unaligned_data() method of the AlignmentBuffer<>.
25 *
26 * - is_not_special: the final block is treated like any other block
27 * - must_be_deferred: the final block is not emitted while bulk processing (typically add_data())
28 * but is deferred until manually consumed (typically final_result())
29 *
30 * The AlignmentBuffer<> assumes data to be "the final block" if no further
31 * input data is available in the BufferSlicer<>. This might result in some
32 * performance overhead when using the must_be_deferred strategy.
33 */
34enum class AlignmentBufferFinalBlock : size_t {
37};
38
39/**
40 * @brief Alignment buffer helper
41 *
42 * Many algorithms have an intrinsic block size in which they consume input
43 * data. When streaming arbitrary data chunks to such algorithms we must store
44 * some data intermittently to honor the algorithm's alignment requirements.
45 *
46 * This helper encapsulates such an alignment buffer. The API of this class is
47 * designed to minimize user errors in the algorithm implementations. Therefore,
48 * it is strongly opinionated on its use case. Don't try to use it for anything
49 * but the described circumstance.
50 *
51 * @tparam T the element type of the internal buffer
52 * @tparam BLOCK_SIZE the buffer size to use for the alignment buffer
53 * @tparam FINAL_BLOCK_STRATEGY defines whether the final input data block is
54 * retained in handle_unaligned_data() and must be
55 * manually consumed
56 */
57template <typename T,
58 size_t BLOCK_SIZE,
60 requires(BLOCK_SIZE > 0)
62 public:
63 AlignmentBuffer() : m_position(0) {}
64
65 ~AlignmentBuffer() { secure_scrub_memory(m_buffer.data(), m_buffer.size()); }
66
67 AlignmentBuffer(const AlignmentBuffer& other) = default;
68 AlignmentBuffer(AlignmentBuffer&& other) noexcept = default;
69 AlignmentBuffer& operator=(const AlignmentBuffer& other) = default;
70 AlignmentBuffer& operator=(AlignmentBuffer&& other) noexcept = default;
71
72 void clear() {
73 clear_mem(m_buffer.data(), m_buffer.size());
74 m_position = 0;
75 }
76
77 /**
78 * Fills the currently unused bytes of the buffer with zero bytes
79 */
81 if(!ready_to_consume()) {
82 clear_mem(&m_buffer[m_position], elements_until_alignment());
83 m_position = m_buffer.size();
84 }
85 }
86
87 /**
88 * Appends the provided @p elements to the buffer. The user has to make
89 * sure that @p elements fits in the remaining capacity of the buffer.
90 */
91 void append(std::span<const T> elements) {
92 BOTAN_ASSERT_NOMSG(elements.size() <= elements_until_alignment());
93 std::copy(elements.begin(), elements.end(), m_buffer.begin() + m_position);
94 m_position += elements.size();
95 }
96
97 /**
98 * Allows direct modification of the first @p elements in the buffer.
99 * This is a low-level accessor that neither takes the buffer's current
100 * capacity into account nor does it change the internal cursor.
101 * Beware not to overwrite unconsumed bytes.
102 */
103 std::span<T> directly_modify_first(size_t elements) {
104 BOTAN_ASSERT_NOMSG(size() >= elements);
105 return std::span(m_buffer).first(elements);
106 }
107
108 /**
109 * Allows direct modification of the last @p elements in the buffer.
110 * This is a low-level accessor that neither takes the buffer's current
111 * capacity into account nor does it change the internal cursor.
112 * Beware not to overwrite unconsumed bytes.
113 */
114 std::span<T> directly_modify_last(size_t elements) {
115 BOTAN_ASSERT_NOMSG(size() >= elements);
116 return std::span(m_buffer).last(elements);
117 }
118
119 /**
120 * Once the buffer reached alignment, this can be used to consume as many
121 * input bytes from the given @p slider as possible. The output always
122 * contains data elements that are a multiple of the intrinsic block size.
123 *
124 * @returns a view onto the aligned data from @p slicer and the number of
125 * full blocks that are represented by this view.
126 */
127 [[nodiscard]] std::tuple<std::span<const uint8_t>, size_t> aligned_data_to_process(BufferSlicer& slicer) const {
128 BOTAN_ASSERT_NOMSG(in_alignment());
129
130 // When the final block is to be deferred, the last block must not be
131 // selected for processing if there is no (unaligned) extra input data.
132 const size_t defer = (defers_final_block()) ? 1 : 0;
133 const size_t full_blocks_to_process = (slicer.remaining() - defer) / m_buffer.size();
134 return {slicer.take(full_blocks_to_process * m_buffer.size()), full_blocks_to_process};
135 }
136
137 /**
138 * Once the buffer reached alignment, this can be used to consume full
139 * blocks from the input data represented by @p slicer.
140 *
141 * @returns a view onto the next full block from @p slicer or std::nullopt
142 * if not enough data is available in @p slicer.
143 */
144 [[nodiscard]] std::optional<std::span<const uint8_t>> next_aligned_block_to_process(BufferSlicer& slicer) const {
145 BOTAN_ASSERT_NOMSG(in_alignment());
146
147 // When the final block is to be deferred, the last block must not be
148 // selected for processing if there is no (unaligned) extra input data.
149 const size_t defer = (defers_final_block()) ? 1 : 0;
150 if(slicer.remaining() < m_buffer.size() + defer) {
151 return std::nullopt;
152 }
153
154 return slicer.take(m_buffer.size());
155 }
156
157 /**
158 * Intermittently buffers potentially unaligned data provided in @p
159 * slicer. If the internal buffer already contains some elements, data is
160 * appended. Once a full block is collected, it is returned to the caller
161 * for processing.
162 *
163 * @param slicer the input data source to be (partially) consumed
164 * @returns a view onto a full block once enough data was collected, or
165 * std::nullopt if no full block is available yet
166 */
167 [[nodiscard]] std::optional<std::span<const T>> handle_unaligned_data(BufferSlicer& slicer) {
168 // When the final block is to be deferred, we would need to store and
169 // hold a buffer that contains exactly one block until more data is
170 // passed or it is explicitly consumed.
171 const size_t defer = (defers_final_block()) ? 1 : 0;
172
173 if(in_alignment() && slicer.remaining() >= m_buffer.size() + defer) {
174 // We are currently in alignment and the passed-in data source
175 // contains enough data to benefit from aligned processing.
176 // Therefore, we don't copy anything into the intermittent buffer.
177 return std::nullopt;
178 }
179
180 // Fill the buffer with as much input data as needed to reach alignment
181 // or until the input source is depleted.
182 const auto elements_to_consume = std::min(m_buffer.size() - m_position, slicer.remaining());
183 append(slicer.take(elements_to_consume));
184
185 // If we collected enough data, we push out one full block. When
186 // deferring the final block is enabled, we additionally check that
187 // more input data is available to continue processing a consecutive
188 // block.
189 if(ready_to_consume() && (!defers_final_block() || !slicer.empty())) {
190 return consume();
191 } else {
192 return std::nullopt;
193 }
194 }
195
196 /**
197 * Explicitly consume the currently collected block. It is the caller's
198 * responsibility to ensure that the buffer is filled fully. After
199 * consumption, the buffer is cleared and ready to collect new data.
200 */
201 [[nodiscard]] std::span<const T> consume() {
202 BOTAN_ASSERT_NOMSG(ready_to_consume());
203 m_position = 0;
204 return m_buffer;
205 }
206
207 /**
208 * Explicitly consumes however many bytes are currently stored in the
209 * buffer. After consumption, the buffer is cleared and ready to collect
210 * new data.
211 */
212 [[nodiscard]] std::span<const T> consume_partial() {
213 const auto elements = elements_in_buffer();
214 m_position = 0;
215 return std::span(m_buffer).first(elements);
216 }
217
218 constexpr size_t size() const { return m_buffer.size(); }
219
220 size_t elements_in_buffer() const { return m_position; }
221
222 size_t elements_until_alignment() const { return m_buffer.size() - m_position; }
223
224 /**
225 * @returns true if the buffer is empty (i.e. contains no unaligned data)
226 */
227 bool in_alignment() const { return m_position == 0; }
228
229 /**
230 * @returns true if the buffer is full (i.e. a block is ready to be consumed)
231 */
232 bool ready_to_consume() const { return m_position == m_buffer.size(); }
233
234 constexpr bool defers_final_block() const {
235 return FINAL_BLOCK_STRATEGY == AlignmentBufferFinalBlock::must_be_deferred;
236 }
237
238 private:
239 std::array<T, BLOCK_SIZE> m_buffer;
240 size_t m_position;
241};
242
243} // namespace Botan
244
245#endif
#define BOTAN_ASSERT_NOMSG(expr)
Definition assert.h:59
Alignment buffer helper.
AlignmentBuffer & operator=(AlignmentBuffer &&other) noexcept=default
constexpr size_t size() const
AlignmentBuffer(AlignmentBuffer &&other) noexcept=default
std::span< T > directly_modify_first(size_t elements)
constexpr bool defers_final_block() const
AlignmentBuffer & operator=(const AlignmentBuffer &other)=default
size_t elements_in_buffer() const
std::tuple< std::span< const uint8_t >, size_t > aligned_data_to_process(BufferSlicer &slicer) const
std::optional< std::span< const uint8_t > > next_aligned_block_to_process(BufferSlicer &slicer) const
AlignmentBuffer(const AlignmentBuffer &other)=default
void append(std::span< const T > elements)
size_t elements_until_alignment() const
std::optional< std::span< const T > > handle_unaligned_data(BufferSlicer &slicer)
std::span< T > directly_modify_last(size_t elements)
std::span< const T > consume()
std::span< const T > consume_partial()
size_t remaining() const
Definition stl_util.h:127
bool empty() const
Definition stl_util.h:129
std::span< const uint8_t > take(const size_t count)
Definition stl_util.h:98
FE_25519 T
Definition ge.cpp:34
void secure_scrub_memory(void *ptr, size_t n)
Definition os_utils.cpp:89
AlignmentBufferFinalBlock
constexpr void clear_mem(T *ptr, size_t n)
Definition mem_ops.h:120