9#include <botan/internal/blake2b.h>
11#include <botan/exceptn.h>
12#include <botan/mem_ops.h>
13#include <botan/internal/fmt.h>
14#include <botan/internal/loadstor.h>
15#include <botan/internal/rotate.h>
16#include <botan/internal/stl_util.h>
25constexpr std::array<uint64_t, 8> blake2b_IV{0x6a09e667f3bcc908,
36BLAKE2b::BLAKE2b(
size_t output_bits) : m_output_bits(output_bits), m_H(blake2b_IV.size()), m_T(), m_F(), m_key_size(0) {
37 if(output_bits == 0 || output_bits > 512 || output_bits % 8 != 0) {
44void BLAKE2b::state_init() {
45 copy_mem(m_H.data(), blake2b_IV.data(), blake2b_IV.size());
46 m_H[0] ^= (0x01010000 | (
static_cast<uint8_t
>(m_key_size) << 8) |
static_cast<uint8_t
>(
output_length()));
52 m_buffer.
append(m_padded_key_buffer);
58BOTAN_FORCE_INLINE void G(uint64_t& a, uint64_t&
b, uint64_t& c, uint64_t& d, uint64_t M0, uint64_t M1) {
86 G(v[0], v[4], v[8], v[12], M[i0], M[i1]);
87 G(v[1], v[5], v[9], v[13], M[i2], M[i3]);
88 G(v[2], v[6], v[10], v[14], M[i4], M[i5]);
89 G(v[3], v[7], v[11], v[15], M[i6], M[i7]);
90 G(v[0], v[5], v[10], v[15], M[i8], M[i9]);
91 G(v[1], v[6], v[11], v[12], M[iA], M[iB]);
92 G(v[2], v[7], v[8], v[13], M[iC], M[iD]);
93 G(v[3], v[4], v[9], v[14], M[iE], M[iF]);
98void BLAKE2b::compress(
const uint8_t* input,
size_t blocks, uint64_t increment) {
99 for(
size_t b = 0;
b != blocks; ++
b) {
101 if(m_T[0] < increment) {
111 for(
size_t i = 0; i < 8; i++) {
114 for(
size_t i = 0; i != 8; ++i) {
115 v[i + 8] = blake2b_IV[i];
122 ROUND<0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15>(v, M);
123 ROUND<14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3>(v, M);
124 ROUND<11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4>(v, M);
125 ROUND<7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8>(v, M);
126 ROUND<9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13>(v, M);
127 ROUND<2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9>(v, M);
128 ROUND<12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11>(v, M);
129 ROUND<13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10>(v, M);
130 ROUND<6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5>(v, M);
131 ROUND<10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0>(v, M);
132 ROUND<0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15>(v, M);
133 ROUND<14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3>(v, M);
135 for(
size_t i = 0; i < 8; i++) {
136 m_H[i] ^= v[i] ^ v[i + 8];
151 if(full_blocks > 0) {
162 m_F = 0xFFFFFFFFFFFFFFFF;
163 compress(m_buffer.
consume().data(), 1, pos);
173 return fmt(
"BLAKE2b({})", m_output_bits);
177 return std::make_unique<BLAKE2b>(m_output_bits);
181 return std::make_unique<BLAKE2b>(*
this);
185 return m_key_size > 0;
191 m_key_size = key.size();
192 m_padded_key_buffer.resize(m_buffer.
size());
194 if(m_padded_key_buffer.size() > m_key_size) {
195 size_t padding = m_padded_key_buffer.size() - m_key_size;
196 clear_mem(m_padded_key_buffer.data() + m_key_size, padding);
199 copy_mem(m_padded_key_buffer.data(), key.data(), key.size());
#define BOTAN_ASSERT_NOMSG(expr)
constexpr size_t size() const
size_t elements_in_buffer() const
std::tuple< std::span< const uint8_t >, size_t > aligned_data_to_process(BufferSlicer &slicer) const
void append(std::span< const T > elements)
std::optional< std::span< const T > > handle_unaligned_data(BufferSlicer &slicer)
std::span< const T > consume()
void fill_up_with_zeros()
bool in_alignment() const
BLAKE2b(size_t output_bits=512)
Key_Length_Specification key_spec() const override
void key_schedule(std::span< const uint8_t > key) override
size_t output_length() const override
std::unique_ptr< HashFunction > new_object() const override
std::string name() const override
bool has_keying_material() const override
void add_data(std::span< const uint8_t > input) override
std::unique_ptr< HashFunction > copy_state() const override
void final_result(std::span< uint8_t > out) override
#define BOTAN_FORCE_INLINE
void zeroise(std::vector< T, Alloc > &vec)
constexpr size_t BLAKE2B_BLOCKBYTES
std::string fmt(std::string_view format, const T &... args)
constexpr T rotr(T input)
void copy_out_le(std::span< uint8_t > out, InR &&in)
constexpr auto load_le(ParamTs &&... params)
constexpr void copy_mem(T *out, const T *in, size_t n)
constexpr void clear_mem(T *ptr, size_t n)