Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
block-buffer improvements
  • Loading branch information
newpavlov committed Jun 10, 2020
commit 3a083ec09d78c686f7b2eb2ba7e63026ea521b96
10 changes: 3 additions & 7 deletions block-buffer/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,19 +1,15 @@
[package]
name = "block-buffer"
version = "0.8.0"
version = "0.9.0-pre"
authors = ["RustCrypto Developers"]
license = "MIT OR Apache-2.0"
description = "Fixed size buffer for block processing of data"
documentation = "https://docs.rs/block-buffer"
repository = "https://github.com/RustCrypto/utils"
keywords = ["block", "buffer"]
categories = ["cryptography", "no-std"]
edition = "2018"

[dependencies]
byteorder = { version = "1.1", default-features = false }
byte-tools = "0.3"
block-padding = "0.1"
block-padding = { version = "0.1", optional = true }
generic-array = "0.14"

[badges]
travis-ci = { repository = "RustCrypto/utils" }
157 changes: 85 additions & 72 deletions block-buffer/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
#![no_std]
pub extern crate byteorder;
pub extern crate block_padding;
pub extern crate generic_array;
extern crate byte_tools;
#[cfg(feature = "block-padding")]
pub extern crate block_padding;

use byteorder::{ByteOrder, BE};
use byte_tools::zero;
use block_padding::{Padding, PadError};
use core::{slice, convert::TryInto};
use generic_array::{GenericArray, ArrayLength};
#[cfg(feature = "block-padding")]
use block_padding::{Padding, PadError};

/// Buffer for block processing of data
#[derive(Clone, Default)]
Expand All @@ -16,67 +15,63 @@ pub struct BlockBuffer<BlockSize: ArrayLength<u8>> {
pos: usize,
}

#[inline(always)]
unsafe fn cast<N: ArrayLength<u8>>(block: &[u8]) -> &GenericArray<u8, N> {
debug_assert_eq!(block.len(), N::to_usize());
&*(block.as_ptr() as *const GenericArray<u8, N>)
}



impl<BlockSize: ArrayLength<u8>> BlockBuffer<BlockSize> {
/// Process data in `input` in blocks of size `BlockSize` using function `f`.
#[inline]
pub fn input<F>(&mut self, mut input: &[u8], mut f: F)
where F: FnMut(&GenericArray<u8, BlockSize>)
{
// If there is already data in the buffer, process it if we have
// enough to complete the chunk.
let rem = self.remaining();
if self.pos != 0 && input.len() >= rem {
let (l, r) = input.split_at(rem);
pub fn input_block<F>(
&mut self, mut input: &[u8], mut f: impl FnMut(&GenericArray<u8, BlockSize>),
) {
let r = self.remaining();
if input.len() < r {
let n = input.len();
self.buffer[self.pos..self.pos + n].copy_from_slice(input);
self.pos += n;
return;
}
if self.pos != 0 && input.len() >= r {
let (l, r) = input.split_at(r);
input = r;
self.buffer[self.pos..].copy_from_slice(l);
self.pos = 0;
f(&self.buffer);
}

// While we have at least a full buffer size chunks's worth of data,
// process that data without copying it into the buffer
while input.len() >= self.size() {
let (block, r) = input.split_at(self.size());
input = r;
f(unsafe { cast(block) });
let mut chunks_iter = input.chunks_exact(self.size());
for chunk in &mut chunks_iter {
f(chunk.try_into().unwrap());
}
let rem = chunks_iter.remainder();

// Copy any remaining data into the buffer.
self.buffer[self.pos..self.pos+input.len()].copy_from_slice(input);
self.pos += input.len();
self.buffer[..rem.len()].copy_from_slice(rem);
self.pos = rem.len();
}

/*
/// Process data in `input` in blocks of size `BlockSize` using function `f`, which accepts
/// slice of blocks.
#[inline]
pub fn input2<F>(&mut self, mut input: &[u8], mut f: F)
where F: FnMut(&[GenericArray<u8, BlockSize>])
{
// If there is already data in the buffer, process it if we have
// enough to complete the chunk.
let rem = self.remaining();
if self.pos != 0 && input.len() >= rem {
let (l, r) = input.split_at(rem);
pub fn input_blocks(
&mut self, mut input: &[u8], mut f: impl FnMut(&[GenericArray<u8, BlockSize>]),
) {
let r = self.remaining();
if input.len() < r {
let n = input.len();
self.buffer[self.pos..self.pos + n].copy_from_slice(input);
self.pos += n;
return;
}
if self.pos != 0 && input.len() >= r {
let (l, r) = input.split_at(r);
input = r;
self.buffer[self.pos..].copy_from_slice(l);
self.pos = 0;
f(slice::from_ref(&self.buffer));
}

// While we have at least a full buffer size chunks's worth of data,
// process it data without copying into the buffer
// process its data without copying into the buffer
let n_blocks = input.len()/self.size();
let (left, right) = input.split_at(n_blocks*self.size());
// safe because we guarantee that `blocks` does not point outside of `input`
// SAFETY: we guarantee that `blocks` does not point outside of `input`
let blocks = unsafe {
slice::from_raw_parts(
left.as_ptr() as *const GenericArray<u8, BlockSize>,
Expand All @@ -86,10 +81,10 @@ impl<BlockSize: ArrayLength<u8>> BlockBuffer<BlockSize> {
f(blocks);

// Copy remaining data into the buffer.
self.buffer[self.pos..self.pos+right.len()].copy_from_slice(right);
self.pos += right.len();
let n = right.len();
self.buffer[..n].copy_from_slice(right);
self.pos = n;
}
*/

/// Variant that doesn't flush the buffer until there's additional
/// data to be processed. Suitable for tweakable block ciphers
Expand All @@ -99,73 +94,90 @@ impl<BlockSize: ArrayLength<u8>> BlockBuffer<BlockSize> {
pub fn input_lazy<F>(&mut self, mut input: &[u8], mut f: F)
where F: FnMut(&GenericArray<u8, BlockSize>)
{
let rem = self.remaining();
if self.pos != 0 && input.len() > rem {
let (l, r) = input.split_at(rem);
let r = self.remaining();
if input.len() <= r {
let n = input.len();
self.buffer[self.pos..self.pos + n].copy_from_slice(input);
self.pos += n;
return;
}
if self.pos != 0 && input.len() > r {
let (l, r) = input.split_at(r);
input = r;
self.buffer[self.pos..].copy_from_slice(l);
self.pos = 0;
f(&self.buffer);
}

while input.len() > self.size() {
let (block, r) = input.split_at(self.size());
input = r;
f(unsafe { cast(block) });
f(block.try_into().unwrap());
}

self.buffer[self.pos..self.pos+input.len()].copy_from_slice(input);
self.pos += input.len();
self.buffer[..input.len()].copy_from_slice(input);
self.pos = input.len();
}

/// Pad buffer with `prefix` and make sure that internall buffer
/// has at least `up_to` free bytes. All remaining bytes get
/// zeroed-out.
#[inline]
fn digest_pad<F>(&mut self, up_to: usize, f: &mut F)
where F: FnMut(&GenericArray<u8, BlockSize>)
{
fn digest_pad(
&mut self, up_to: usize, mut f: impl FnMut(&GenericArray<u8, BlockSize>),
) {
if self.pos == self.size() {
f(&self.buffer);
self.pos = 0;
}
self.buffer[self.pos] = 0x80;
self.pos += 1;

zero(&mut self.buffer[self.pos..]);
self.buffer[self.pos..].iter_mut().for_each(|b| *b = 0);

if self.remaining() < up_to {
f(&self.buffer);
zero(&mut self.buffer[..self.pos]);
self.buffer[..self.pos].iter_mut().for_each(|b| *b = 0);
}
}

/// Pad message with 0x80, zeros and 64-bit message length
/// in a byte order specified by `B`
/// using big-endian byte order
#[inline]
pub fn len64_padding<B, F>(&mut self, data_len: u64, mut f: F)
where B: ByteOrder, F: FnMut(&GenericArray<u8, BlockSize>)
{
// TODO: replace `F` with `impl Trait` on MSRV bump
pub fn len64_padding_be(
&mut self, data_len: u64, mut f: impl FnMut(&GenericArray<u8, BlockSize>),
) {
self.digest_pad(8, &mut f);
let s = self.size();
B::write_u64(&mut self.buffer[s-8..], data_len);
let b = data_len.to_be_bytes();
let n = self.buffer.len() - b.len();
self.buffer[n..].copy_from_slice(&b);
f(&self.buffer);
self.pos = 0;
}

/// Pad message with 0x80, zeros and 64-bit message length
/// using little-endian byte order
#[inline]
pub fn len64_padding_le(
&mut self, data_len: u64, mut f: impl FnMut(&GenericArray<u8, BlockSize>),
) {
self.digest_pad(8, &mut f);
let b = data_len.to_le_bytes();
let n = self.buffer.len() - b.len();
self.buffer[n..].copy_from_slice(&b);
f(&self.buffer);
self.pos = 0;
}

/// Pad message with 0x80, zeros and 128-bit message length
/// in the big-endian byte order
/// using big-endian byte order
#[inline]
pub fn len128_padding_be<F>(&mut self, hi: u64, lo: u64, mut f: F)
where F: FnMut(&GenericArray<u8, BlockSize>)
{
// TODO: on MSRV bump replace `F` with `impl Trait`, use `u128`, add `B`
pub fn len128_padding_be(
&mut self, data_len: u128, mut f: impl FnMut(&GenericArray<u8, BlockSize>),
) {
self.digest_pad(16, &mut f);
let s = self.size();
BE::write_u64(&mut self.buffer[s-16..s-8], hi);
BE::write_u64(&mut self.buffer[s-8..], lo);
let b = data_len.to_be_bytes();
let n = self.buffer.len() - b.len();
self.buffer[n..].copy_from_slice(&b);
f(&self.buffer);
self.pos = 0;
}
Expand All @@ -174,6 +186,7 @@ impl<BlockSize: ArrayLength<u8>> BlockBuffer<BlockSize> {
///
/// Returns `PadError` if internall buffer is full, which can only happen if
/// `input_lazy` was used.
#[cfg(feature = "block-padding")]
#[inline]
pub fn pad_with<P: Padding>(&mut self)
-> Result<&mut GenericArray<u8, BlockSize>, PadError>
Expand Down