Skip to content

Commit 04fac30

Browse files
committed
Document that BFloat16 is not an IEEE-754 16-bit float
Fixes tensorflow#233
1 parent 1a514dc commit 04fac30

File tree

1 file changed

+8
-0
lines changed

1 file changed

+8
-0
lines changed

src/lib.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -352,6 +352,10 @@ c_enum!("Type of a single tensor element.", TF_DataType, DataType {
352352
value QInt32 = 13,
353353

354354
/// Float32 truncated to 16 bits. Only for cast ops.
355+
/// Note that this is not the same as Half. BFloat16 is not an IEEE-754
356+
/// 16-bit float. See
357+
/// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/bfloat16.h
358+
/// for details.
355359
value BFloat16 = 14,
356360

357361
/// Quantized 16-bit signed integer.
@@ -728,6 +732,10 @@ q_type!(i32,
728732
////////////////////////
729733

730734
/// BFloat16 provides a Rust type for BFloat16.
735+
/// Note that this is not the same as half::f16. BFloat16 is not an IEEE-754
736+
/// 16-bit float. See
737+
/// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/bfloat16.h
738+
/// for details.
731739
#[derive(Debug, Clone, Copy, Default)]
732740
pub struct BFloat16(u16);
733741

0 commit comments

Comments
 (0)